code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def start(info): """Run the dev server. Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if available, to provide `runserver_plus`. Set the command to use with `options.paved.django.runserver` Set the port to use with `options.paved.django.runserver_port` """ cmd = options.paved.django.runserver if cmd == 'runserver_plus': try: import django_extensions except ImportError: info("Could not import django_extensions. Using default runserver.") cmd = 'runserver' port = options.paved.django.runserver_port if port: cmd = '%s %s' % (cmd, port) call_manage(cmd)
def function[start, parameter[info]]: constant[Run the dev server. Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if available, to provide `runserver_plus`. Set the command to use with `options.paved.django.runserver` Set the port to use with `options.paved.django.runserver_port` ] variable[cmd] assign[=] name[options].paved.django.runserver if compare[name[cmd] equal[==] constant[runserver_plus]] begin[:] <ast.Try object at 0x7da1b004bc10> variable[port] assign[=] name[options].paved.django.runserver_port if name[port] begin[:] variable[cmd] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b004ada0>, <ast.Name object at 0x7da1b0048b80>]]] call[name[call_manage], parameter[name[cmd]]]
keyword[def] identifier[start] ( identifier[info] ): literal[string] identifier[cmd] = identifier[options] . identifier[paved] . identifier[django] . identifier[runserver] keyword[if] identifier[cmd] == literal[string] : keyword[try] : keyword[import] identifier[django_extensions] keyword[except] identifier[ImportError] : identifier[info] ( literal[string] ) identifier[cmd] = literal[string] identifier[port] = identifier[options] . identifier[paved] . identifier[django] . identifier[runserver_port] keyword[if] identifier[port] : identifier[cmd] = literal[string] %( identifier[cmd] , identifier[port] ) identifier[call_manage] ( identifier[cmd] )
def start(info): """Run the dev server. Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if available, to provide `runserver_plus`. Set the command to use with `options.paved.django.runserver` Set the port to use with `options.paved.django.runserver_port` """ cmd = options.paved.django.runserver if cmd == 'runserver_plus': try: import django_extensions # depends on [control=['try'], data=[]] except ImportError: info('Could not import django_extensions. Using default runserver.') cmd = 'runserver' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['cmd']] port = options.paved.django.runserver_port if port: cmd = '%s %s' % (cmd, port) # depends on [control=['if'], data=[]] call_manage(cmd)
def skill_configuration(self): # type: () -> SkillConfiguration """Create the skill configuration object using the registered components. """ skill_config = super(StandardSkillBuilder, self).skill_configuration skill_config.api_client = DefaultApiClient() if self.table_name is not None: kwargs = {"table_name": self.table_name} # type: Dict[str, Any] if self.auto_create_table: kwargs["create_table"] = self.auto_create_table if self.partition_keygen: kwargs["partition_keygen"] = self.partition_keygen if self.dynamodb_client: kwargs["dynamodb_resource"] = self.dynamodb_client skill_config.persistence_adapter = DynamoDbAdapter(**kwargs) return skill_config
def function[skill_configuration, parameter[self]]: constant[Create the skill configuration object using the registered components. ] variable[skill_config] assign[=] call[name[super], parameter[name[StandardSkillBuilder], name[self]]].skill_configuration name[skill_config].api_client assign[=] call[name[DefaultApiClient], parameter[]] if compare[name[self].table_name is_not constant[None]] begin[:] variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b19efe20>], [<ast.Attribute object at 0x7da1b19ef3d0>]] if name[self].auto_create_table begin[:] call[name[kwargs]][constant[create_table]] assign[=] name[self].auto_create_table if name[self].partition_keygen begin[:] call[name[kwargs]][constant[partition_keygen]] assign[=] name[self].partition_keygen if name[self].dynamodb_client begin[:] call[name[kwargs]][constant[dynamodb_resource]] assign[=] name[self].dynamodb_client name[skill_config].persistence_adapter assign[=] call[name[DynamoDbAdapter], parameter[]] return[name[skill_config]]
keyword[def] identifier[skill_configuration] ( identifier[self] ): literal[string] identifier[skill_config] = identifier[super] ( identifier[StandardSkillBuilder] , identifier[self] ). identifier[skill_configuration] identifier[skill_config] . identifier[api_client] = identifier[DefaultApiClient] () keyword[if] identifier[self] . identifier[table_name] keyword[is] keyword[not] keyword[None] : identifier[kwargs] ={ literal[string] : identifier[self] . identifier[table_name] } keyword[if] identifier[self] . identifier[auto_create_table] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[auto_create_table] keyword[if] identifier[self] . identifier[partition_keygen] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[partition_keygen] keyword[if] identifier[self] . identifier[dynamodb_client] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[dynamodb_client] identifier[skill_config] . identifier[persistence_adapter] = identifier[DynamoDbAdapter] (** identifier[kwargs] ) keyword[return] identifier[skill_config]
def skill_configuration(self): # type: () -> SkillConfiguration 'Create the skill configuration object using the registered\n components.\n ' skill_config = super(StandardSkillBuilder, self).skill_configuration skill_config.api_client = DefaultApiClient() if self.table_name is not None: kwargs = {'table_name': self.table_name} # type: Dict[str, Any] if self.auto_create_table: kwargs['create_table'] = self.auto_create_table # depends on [control=['if'], data=[]] if self.partition_keygen: kwargs['partition_keygen'] = self.partition_keygen # depends on [control=['if'], data=[]] if self.dynamodb_client: kwargs['dynamodb_resource'] = self.dynamodb_client # depends on [control=['if'], data=[]] skill_config.persistence_adapter = DynamoDbAdapter(**kwargs) # depends on [control=['if'], data=[]] return skill_config
def apply_tfms(self, tfms:TfmList, do_resolve:bool=True, xtra:Optional[Dict[Callable,dict]]=None, size:Optional[Union[int,TensorImageSize]]=None, resize_method:ResizeMethod=None, mult:int=None, padding_mode:str='reflection', mode:str='bilinear', remove_out:bool=True)->TensorImage: "Apply all `tfms` to the `Image`, if `do_resolve` picks value for random args." if not (tfms or xtra or size): return self tfms = listify(tfms) xtra = ifnone(xtra, {}) default_rsz = ResizeMethod.SQUISH if (size is not None and is_listy(size)) else ResizeMethod.CROP resize_method = ifnone(resize_method, default_rsz) if resize_method <= 2 and size is not None: tfms = self._maybe_add_crop_pad(tfms) tfms = sorted(tfms, key=lambda o: o.tfm.order) if do_resolve: _resolve_tfms(tfms) x = self.clone() x.set_sample(padding_mode=padding_mode, mode=mode, remove_out=remove_out) if size is not None: crop_target = _get_crop_target(size, mult=mult) if resize_method in (ResizeMethod.CROP,ResizeMethod.PAD): target = _get_resize_target(x, crop_target, do_crop=(resize_method==ResizeMethod.CROP)) x.resize(target) elif resize_method==ResizeMethod.SQUISH: x.resize((x.shape[0],) + crop_target) else: size = x.size size_tfms = [o for o in tfms if isinstance(o.tfm,TfmCrop)] for tfm in tfms: if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm]) elif tfm in size_tfms: if resize_method in (ResizeMethod.CROP,ResizeMethod.PAD): x = tfm(x, size=_get_crop_target(size,mult=mult), padding_mode=padding_mode) else: x = tfm(x) return x.refresh()
def function[apply_tfms, parameter[self, tfms, do_resolve, xtra, size, resize_method, mult, padding_mode, mode, remove_out]]: constant[Apply all `tfms` to the `Image`, if `do_resolve` picks value for random args.] if <ast.UnaryOp object at 0x7da1b1eb4a00> begin[:] return[name[self]] variable[tfms] assign[=] call[name[listify], parameter[name[tfms]]] variable[xtra] assign[=] call[name[ifnone], parameter[name[xtra], dictionary[[], []]]] variable[default_rsz] assign[=] <ast.IfExp object at 0x7da1b1eb47c0> variable[resize_method] assign[=] call[name[ifnone], parameter[name[resize_method], name[default_rsz]]] if <ast.BoolOp object at 0x7da20cabe890> begin[:] variable[tfms] assign[=] call[name[self]._maybe_add_crop_pad, parameter[name[tfms]]] variable[tfms] assign[=] call[name[sorted], parameter[name[tfms]]] if name[do_resolve] begin[:] call[name[_resolve_tfms], parameter[name[tfms]]] variable[x] assign[=] call[name[self].clone, parameter[]] call[name[x].set_sample, parameter[]] if compare[name[size] is_not constant[None]] begin[:] variable[crop_target] assign[=] call[name[_get_crop_target], parameter[name[size]]] if compare[name[resize_method] in tuple[[<ast.Attribute object at 0x7da20cabf160>, <ast.Attribute object at 0x7da20cabfc70>]]] begin[:] variable[target] assign[=] call[name[_get_resize_target], parameter[name[x], name[crop_target]]] call[name[x].resize, parameter[name[target]]] variable[size_tfms] assign[=] <ast.ListComp object at 0x7da20cabc7c0> for taget[name[tfm]] in starred[name[tfms]] begin[:] if compare[name[tfm].tfm in name[xtra]] begin[:] variable[x] assign[=] call[name[tfm], parameter[name[x]]] return[call[name[x].refresh, parameter[]]]
keyword[def] identifier[apply_tfms] ( identifier[self] , identifier[tfms] : identifier[TfmList] , identifier[do_resolve] : identifier[bool] = keyword[True] , identifier[xtra] : identifier[Optional] [ identifier[Dict] [ identifier[Callable] , identifier[dict] ]]= keyword[None] , identifier[size] : identifier[Optional] [ identifier[Union] [ identifier[int] , identifier[TensorImageSize] ]]= keyword[None] , identifier[resize_method] : identifier[ResizeMethod] = keyword[None] , identifier[mult] : identifier[int] = keyword[None] , identifier[padding_mode] : identifier[str] = literal[string] , identifier[mode] : identifier[str] = literal[string] , identifier[remove_out] : identifier[bool] = keyword[True] )-> identifier[TensorImage] : literal[string] keyword[if] keyword[not] ( identifier[tfms] keyword[or] identifier[xtra] keyword[or] identifier[size] ): keyword[return] identifier[self] identifier[tfms] = identifier[listify] ( identifier[tfms] ) identifier[xtra] = identifier[ifnone] ( identifier[xtra] ,{}) identifier[default_rsz] = identifier[ResizeMethod] . identifier[SQUISH] keyword[if] ( identifier[size] keyword[is] keyword[not] keyword[None] keyword[and] identifier[is_listy] ( identifier[size] )) keyword[else] identifier[ResizeMethod] . identifier[CROP] identifier[resize_method] = identifier[ifnone] ( identifier[resize_method] , identifier[default_rsz] ) keyword[if] identifier[resize_method] <= literal[int] keyword[and] identifier[size] keyword[is] keyword[not] keyword[None] : identifier[tfms] = identifier[self] . identifier[_maybe_add_crop_pad] ( identifier[tfms] ) identifier[tfms] = identifier[sorted] ( identifier[tfms] , identifier[key] = keyword[lambda] identifier[o] : identifier[o] . identifier[tfm] . identifier[order] ) keyword[if] identifier[do_resolve] : identifier[_resolve_tfms] ( identifier[tfms] ) identifier[x] = identifier[self] . identifier[clone] () identifier[x] . identifier[set_sample] ( identifier[padding_mode] = identifier[padding_mode] , identifier[mode] = identifier[mode] , identifier[remove_out] = identifier[remove_out] ) keyword[if] identifier[size] keyword[is] keyword[not] keyword[None] : identifier[crop_target] = identifier[_get_crop_target] ( identifier[size] , identifier[mult] = identifier[mult] ) keyword[if] identifier[resize_method] keyword[in] ( identifier[ResizeMethod] . identifier[CROP] , identifier[ResizeMethod] . identifier[PAD] ): identifier[target] = identifier[_get_resize_target] ( identifier[x] , identifier[crop_target] , identifier[do_crop] =( identifier[resize_method] == identifier[ResizeMethod] . identifier[CROP] )) identifier[x] . identifier[resize] ( identifier[target] ) keyword[elif] identifier[resize_method] == identifier[ResizeMethod] . identifier[SQUISH] : identifier[x] . identifier[resize] (( identifier[x] . identifier[shape] [ literal[int] ],)+ identifier[crop_target] ) keyword[else] : identifier[size] = identifier[x] . identifier[size] identifier[size_tfms] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[tfms] keyword[if] identifier[isinstance] ( identifier[o] . identifier[tfm] , identifier[TfmCrop] )] keyword[for] identifier[tfm] keyword[in] identifier[tfms] : keyword[if] identifier[tfm] . identifier[tfm] keyword[in] identifier[xtra] : identifier[x] = identifier[tfm] ( identifier[x] ,** identifier[xtra] [ identifier[tfm] . identifier[tfm] ]) keyword[elif] identifier[tfm] keyword[in] identifier[size_tfms] : keyword[if] identifier[resize_method] keyword[in] ( identifier[ResizeMethod] . identifier[CROP] , identifier[ResizeMethod] . identifier[PAD] ): identifier[x] = identifier[tfm] ( identifier[x] , identifier[size] = identifier[_get_crop_target] ( identifier[size] , identifier[mult] = identifier[mult] ), identifier[padding_mode] = identifier[padding_mode] ) keyword[else] : identifier[x] = identifier[tfm] ( identifier[x] ) keyword[return] identifier[x] . identifier[refresh] ()
def apply_tfms(self, tfms: TfmList, do_resolve: bool=True, xtra: Optional[Dict[Callable, dict]]=None, size: Optional[Union[int, TensorImageSize]]=None, resize_method: ResizeMethod=None, mult: int=None, padding_mode: str='reflection', mode: str='bilinear', remove_out: bool=True) -> TensorImage: """Apply all `tfms` to the `Image`, if `do_resolve` picks value for random args.""" if not (tfms or xtra or size): return self # depends on [control=['if'], data=[]] tfms = listify(tfms) xtra = ifnone(xtra, {}) default_rsz = ResizeMethod.SQUISH if size is not None and is_listy(size) else ResizeMethod.CROP resize_method = ifnone(resize_method, default_rsz) if resize_method <= 2 and size is not None: tfms = self._maybe_add_crop_pad(tfms) # depends on [control=['if'], data=[]] tfms = sorted(tfms, key=lambda o: o.tfm.order) if do_resolve: _resolve_tfms(tfms) # depends on [control=['if'], data=[]] x = self.clone() x.set_sample(padding_mode=padding_mode, mode=mode, remove_out=remove_out) if size is not None: crop_target = _get_crop_target(size, mult=mult) if resize_method in (ResizeMethod.CROP, ResizeMethod.PAD): target = _get_resize_target(x, crop_target, do_crop=resize_method == ResizeMethod.CROP) x.resize(target) # depends on [control=['if'], data=['resize_method']] elif resize_method == ResizeMethod.SQUISH: x.resize((x.shape[0],) + crop_target) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['size']] else: size = x.size size_tfms = [o for o in tfms if isinstance(o.tfm, TfmCrop)] for tfm in tfms: if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm]) # depends on [control=['if'], data=['xtra']] elif tfm in size_tfms: if resize_method in (ResizeMethod.CROP, ResizeMethod.PAD): x = tfm(x, size=_get_crop_target(size, mult=mult), padding_mode=padding_mode) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tfm']] else: x = tfm(x) # depends on [control=['for'], data=['tfm']] return x.refresh()
def set_pwm(self, channel, on, off): """Sets a single PWM channel.""" self.i2c.write8(LED0_ON_L+4*channel, on & 0xFF) self.i2c.write8(LED0_ON_H+4*channel, on >> 8) self.i2c.write8(LED0_OFF_L+4*channel, off & 0xFF) self.i2c.write8(LED0_OFF_H+4*channel, off >> 8)
def function[set_pwm, parameter[self, channel, on, off]]: constant[Sets a single PWM channel.] call[name[self].i2c.write8, parameter[binary_operation[name[LED0_ON_L] + binary_operation[constant[4] * name[channel]]], binary_operation[name[on] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]] call[name[self].i2c.write8, parameter[binary_operation[name[LED0_ON_H] + binary_operation[constant[4] * name[channel]]], binary_operation[name[on] <ast.RShift object at 0x7da2590d6a40> constant[8]]]] call[name[self].i2c.write8, parameter[binary_operation[name[LED0_OFF_L] + binary_operation[constant[4] * name[channel]]], binary_operation[name[off] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]] call[name[self].i2c.write8, parameter[binary_operation[name[LED0_OFF_H] + binary_operation[constant[4] * name[channel]]], binary_operation[name[off] <ast.RShift object at 0x7da2590d6a40> constant[8]]]]
keyword[def] identifier[set_pwm] ( identifier[self] , identifier[channel] , identifier[on] , identifier[off] ): literal[string] identifier[self] . identifier[i2c] . identifier[write8] ( identifier[LED0_ON_L] + literal[int] * identifier[channel] , identifier[on] & literal[int] ) identifier[self] . identifier[i2c] . identifier[write8] ( identifier[LED0_ON_H] + literal[int] * identifier[channel] , identifier[on] >> literal[int] ) identifier[self] . identifier[i2c] . identifier[write8] ( identifier[LED0_OFF_L] + literal[int] * identifier[channel] , identifier[off] & literal[int] ) identifier[self] . identifier[i2c] . identifier[write8] ( identifier[LED0_OFF_H] + literal[int] * identifier[channel] , identifier[off] >> literal[int] )
def set_pwm(self, channel, on, off): """Sets a single PWM channel.""" self.i2c.write8(LED0_ON_L + 4 * channel, on & 255) self.i2c.write8(LED0_ON_H + 4 * channel, on >> 8) self.i2c.write8(LED0_OFF_L + 4 * channel, off & 255) self.i2c.write8(LED0_OFF_H + 4 * channel, off >> 8)
def get_variables_path(export_dir): """Returns the path for storing variables checkpoints.""" return os.path.join( tf.compat.as_bytes(export_dir), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_DIRECTORY), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_FILENAME))
def function[get_variables_path, parameter[export_dir]]: constant[Returns the path for storing variables checkpoints.] return[call[name[os].path.join, parameter[call[name[tf].compat.as_bytes, parameter[name[export_dir]]], call[name[tf].compat.as_bytes, parameter[name[tf_v1].saved_model.constants.VARIABLES_DIRECTORY]], call[name[tf].compat.as_bytes, parameter[name[tf_v1].saved_model.constants.VARIABLES_FILENAME]]]]]
keyword[def] identifier[get_variables_path] ( identifier[export_dir] ): literal[string] keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[tf] . identifier[compat] . identifier[as_bytes] ( identifier[export_dir] ), identifier[tf] . identifier[compat] . identifier[as_bytes] ( identifier[tf_v1] . identifier[saved_model] . identifier[constants] . identifier[VARIABLES_DIRECTORY] ), identifier[tf] . identifier[compat] . identifier[as_bytes] ( identifier[tf_v1] . identifier[saved_model] . identifier[constants] . identifier[VARIABLES_FILENAME] ))
def get_variables_path(export_dir): """Returns the path for storing variables checkpoints.""" return os.path.join(tf.compat.as_bytes(export_dir), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_DIRECTORY), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_FILENAME))
async def restore_storage_configuration(self): """ Restore machine's storage configuration to its initial state. """ self._data = await self._handler.restore_storage_configuration( system_id=self.system_id)
<ast.AsyncFunctionDef object at 0x7da20c992bc0>
keyword[async] keyword[def] identifier[restore_storage_configuration] ( identifier[self] ): literal[string] identifier[self] . identifier[_data] = keyword[await] identifier[self] . identifier[_handler] . identifier[restore_storage_configuration] ( identifier[system_id] = identifier[self] . identifier[system_id] )
async def restore_storage_configuration(self): """ Restore machine's storage configuration to its initial state. """ self._data = await self._handler.restore_storage_configuration(system_id=self.system_id)
def run_continuously(self, interval=1): """Continuously run, while executing pending jobs at each elapsed time interval. @return cease_continuous_run: threading.Event which can be set to cease continuous run. Please note that it is *intended behavior that run_continuously() does not run missed jobs*. For example, if you've registered a job that should run every minute and you set a continuous run interval of one hour then your job won't be run 60 times at each interval but only once. """ cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @classmethod def run(cls): while not cease_continuous_run.is_set(): self.run_pending() time.sleep(interval) continuous_thread = ScheduleThread() continuous_thread.start() return cease_continuous_run
def function[run_continuously, parameter[self, interval]]: constant[Continuously run, while executing pending jobs at each elapsed time interval. @return cease_continuous_run: threading.Event which can be set to cease continuous run. Please note that it is *intended behavior that run_continuously() does not run missed jobs*. For example, if you've registered a job that should run every minute and you set a continuous run interval of one hour then your job won't be run 60 times at each interval but only once. ] variable[cease_continuous_run] assign[=] call[name[threading].Event, parameter[]] class class[ScheduleThread, parameter[]] begin[:] def function[run, parameter[cls]]: while <ast.UnaryOp object at 0x7da1b15b7eb0> begin[:] call[name[self].run_pending, parameter[]] call[name[time].sleep, parameter[name[interval]]] variable[continuous_thread] assign[=] call[name[ScheduleThread], parameter[]] call[name[continuous_thread].start, parameter[]] return[name[cease_continuous_run]]
keyword[def] identifier[run_continuously] ( identifier[self] , identifier[interval] = literal[int] ): literal[string] identifier[cease_continuous_run] = identifier[threading] . identifier[Event] () keyword[class] identifier[ScheduleThread] ( identifier[threading] . identifier[Thread] ): @ identifier[classmethod] keyword[def] identifier[run] ( identifier[cls] ): keyword[while] keyword[not] identifier[cease_continuous_run] . identifier[is_set] (): identifier[self] . identifier[run_pending] () identifier[time] . identifier[sleep] ( identifier[interval] ) identifier[continuous_thread] = identifier[ScheduleThread] () identifier[continuous_thread] . identifier[start] () keyword[return] identifier[cease_continuous_run]
def run_continuously(self, interval=1): """Continuously run, while executing pending jobs at each elapsed time interval. @return cease_continuous_run: threading.Event which can be set to cease continuous run. Please note that it is *intended behavior that run_continuously() does not run missed jobs*. For example, if you've registered a job that should run every minute and you set a continuous run interval of one hour then your job won't be run 60 times at each interval but only once. """ cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @classmethod def run(cls): while not cease_continuous_run.is_set(): self.run_pending() time.sleep(interval) # depends on [control=['while'], data=[]] continuous_thread = ScheduleThread() continuous_thread.start() return cease_continuous_run
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str): """ Uses matplotlib for creating a visualization of the attention matrix. :param attention_matrix: The attention matrix. :param source_tokens: A list of source tokens. :param target_tokens: A list of target tokens. :param filename: The file to which the attention visualization will be written to. """ try: import matplotlib except ImportError: raise RuntimeError("Please install matplotlib.") matplotlib.use("Agg") import matplotlib.pyplot as plt assert attention_matrix.shape[0] == len(target_tokens) plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys") plt.xlabel("target") plt.ylabel("source") plt.gca().set_xticks([i for i in range(0, len(target_tokens))]) plt.gca().set_yticks([i for i in range(0, len(source_tokens))]) plt.gca().set_xticklabels(target_tokens, rotation='vertical') plt.gca().set_yticklabels(source_tokens) plt.tight_layout() plt.savefig(filename) logger.info("Saved alignment visualization to " + filename)
def function[plot_attention, parameter[attention_matrix, source_tokens, target_tokens, filename]]: constant[ Uses matplotlib for creating a visualization of the attention matrix. :param attention_matrix: The attention matrix. :param source_tokens: A list of source tokens. :param target_tokens: A list of target tokens. :param filename: The file to which the attention visualization will be written to. ] <ast.Try object at 0x7da2044c20e0> call[name[matplotlib].use, parameter[constant[Agg]]] import module[matplotlib.pyplot] as alias[plt] assert[compare[call[name[attention_matrix].shape][constant[0]] equal[==] call[name[len], parameter[name[target_tokens]]]]] call[name[plt].imshow, parameter[call[name[attention_matrix].transpose, parameter[]]]] call[name[plt].xlabel, parameter[constant[target]]] call[name[plt].ylabel, parameter[constant[source]]] call[call[name[plt].gca, parameter[]].set_xticks, parameter[<ast.ListComp object at 0x7da1b1d5c1f0>]] call[call[name[plt].gca, parameter[]].set_yticks, parameter[<ast.ListComp object at 0x7da1b1d5fee0>]] call[call[name[plt].gca, parameter[]].set_xticklabels, parameter[name[target_tokens]]] call[call[name[plt].gca, parameter[]].set_yticklabels, parameter[name[source_tokens]]] call[name[plt].tight_layout, parameter[]] call[name[plt].savefig, parameter[name[filename]]] call[name[logger].info, parameter[binary_operation[constant[Saved alignment visualization to ] + name[filename]]]]
keyword[def] identifier[plot_attention] ( identifier[attention_matrix] : identifier[np] . identifier[ndarray] , identifier[source_tokens] : identifier[List] [ identifier[str] ], identifier[target_tokens] : identifier[List] [ identifier[str] ], identifier[filename] : identifier[str] ): literal[string] keyword[try] : keyword[import] identifier[matplotlib] keyword[except] identifier[ImportError] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[matplotlib] . identifier[use] ( literal[string] ) keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt] keyword[assert] identifier[attention_matrix] . identifier[shape] [ literal[int] ]== identifier[len] ( identifier[target_tokens] ) identifier[plt] . identifier[imshow] ( identifier[attention_matrix] . identifier[transpose] (), identifier[interpolation] = literal[string] , identifier[cmap] = literal[string] ) identifier[plt] . identifier[xlabel] ( literal[string] ) identifier[plt] . identifier[ylabel] ( literal[string] ) identifier[plt] . identifier[gca] (). identifier[set_xticks] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[target_tokens] ))]) identifier[plt] . identifier[gca] (). identifier[set_yticks] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[source_tokens] ))]) identifier[plt] . identifier[gca] (). identifier[set_xticklabels] ( identifier[target_tokens] , identifier[rotation] = literal[string] ) identifier[plt] . identifier[gca] (). identifier[set_yticklabels] ( identifier[source_tokens] ) identifier[plt] . identifier[tight_layout] () identifier[plt] . identifier[savefig] ( identifier[filename] ) identifier[logger] . identifier[info] ( literal[string] + identifier[filename] )
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str): """ Uses matplotlib for creating a visualization of the attention matrix. :param attention_matrix: The attention matrix. :param source_tokens: A list of source tokens. :param target_tokens: A list of target tokens. :param filename: The file to which the attention visualization will be written to. """ try: import matplotlib # depends on [control=['try'], data=[]] except ImportError: raise RuntimeError('Please install matplotlib.') # depends on [control=['except'], data=[]] matplotlib.use('Agg') import matplotlib.pyplot as plt assert attention_matrix.shape[0] == len(target_tokens) plt.imshow(attention_matrix.transpose(), interpolation='nearest', cmap='Greys') plt.xlabel('target') plt.ylabel('source') plt.gca().set_xticks([i for i in range(0, len(target_tokens))]) plt.gca().set_yticks([i for i in range(0, len(source_tokens))]) plt.gca().set_xticklabels(target_tokens, rotation='vertical') plt.gca().set_yticklabels(source_tokens) plt.tight_layout() plt.savefig(filename) logger.info('Saved alignment visualization to ' + filename)
def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("M8")): # no precision, warn dtype = _NS_DTYPE msg = textwrap.dedent("""\ Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead.""") warnings.warn(msg, FutureWarning, stacklevel=5) if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[ns]' or DatetimeTZDtype'." .format(dtype=dtype)) return dtype
def function[_validate_dt64_dtype, parameter[dtype]]: constant[ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through ] if compare[name[dtype] is_not constant[None]] begin[:] variable[dtype] assign[=] call[name[pandas_dtype], parameter[name[dtype]]] if call[name[is_dtype_equal], parameter[name[dtype], call[name[np].dtype, parameter[constant[M8]]]]] begin[:] variable[dtype] assign[=] name[_NS_DTYPE] variable[msg] assign[=] call[name[textwrap].dedent, parameter[constant[ Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead.]]] call[name[warnings].warn, parameter[name[msg], name[FutureWarning]]] if <ast.BoolOp object at 0x7da1b1dd8fd0> begin[:] <ast.Raise object at 0x7da1b1dda9b0> return[name[dtype]]
keyword[def] identifier[_validate_dt64_dtype] ( identifier[dtype] ): literal[string] keyword[if] identifier[dtype] keyword[is] keyword[not] keyword[None] : identifier[dtype] = identifier[pandas_dtype] ( identifier[dtype] ) keyword[if] identifier[is_dtype_equal] ( identifier[dtype] , identifier[np] . identifier[dtype] ( literal[string] )): identifier[dtype] = identifier[_NS_DTYPE] identifier[msg] = identifier[textwrap] . identifier[dedent] ( literal[string] ) identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[FutureWarning] , identifier[stacklevel] = literal[int] ) keyword[if] (( identifier[isinstance] ( identifier[dtype] , identifier[np] . identifier[dtype] ) keyword[and] identifier[dtype] != identifier[_NS_DTYPE] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[dtype] ,( identifier[np] . identifier[dtype] , identifier[DatetimeTZDtype] ))): keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[dtype] = identifier[dtype] )) keyword[return] identifier[dtype]
def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype('M8')): # no precision, warn dtype = _NS_DTYPE msg = textwrap.dedent(" Passing in 'datetime64' dtype with no precision is deprecated\n and will raise in a future version. Please pass in\n 'datetime64[ns]' instead.") warnings.warn(msg, FutureWarning, stacklevel=5) # depends on [control=['if'], data=[]] if isinstance(dtype, np.dtype) and dtype != _NS_DTYPE or not isinstance(dtype, (np.dtype, DatetimeTZDtype)): raise ValueError("Unexpected value for 'dtype': '{dtype}'. Must be 'datetime64[ns]' or DatetimeTZDtype'.".format(dtype=dtype)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['dtype']] return dtype
async def update_current_price_info(self): """Update current price info async.""" query = gql( """ { viewer { home(id: "%s") { currentSubscription { priceInfo { current { energy tax total startsAt } } } } } } """ % self.home_id ) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error("Could not find current price info.") return try: home = price_info_temp["viewer"]["home"] current_subscription = home["currentSubscription"] price_info = current_subscription["priceInfo"]["current"] except (KeyError, TypeError): _LOGGER.error("Could not find current price info.") return if price_info: self._current_price_info = price_info
<ast.AsyncFunctionDef object at 0x7da1affc1a80>
keyword[async] keyword[def] identifier[update_current_price_info] ( identifier[self] ): literal[string] identifier[query] = identifier[gql] ( literal[string] % identifier[self] . identifier[home_id] ) identifier[price_info_temp] = keyword[await] identifier[self] . identifier[_tibber_control] . identifier[execute] ( identifier[query] ) keyword[if] keyword[not] identifier[price_info_temp] : identifier[_LOGGER] . identifier[error] ( literal[string] ) keyword[return] keyword[try] : identifier[home] = identifier[price_info_temp] [ literal[string] ][ literal[string] ] identifier[current_subscription] = identifier[home] [ literal[string] ] identifier[price_info] = identifier[current_subscription] [ literal[string] ][ literal[string] ] keyword[except] ( identifier[KeyError] , identifier[TypeError] ): identifier[_LOGGER] . identifier[error] ( literal[string] ) keyword[return] keyword[if] identifier[price_info] : identifier[self] . identifier[_current_price_info] = identifier[price_info]
async def update_current_price_info(self): """Update current price info async.""" query = gql('\n {\n viewer {\n home(id: "%s") {\n currentSubscription {\n priceInfo {\n current {\n energy\n tax\n total\n startsAt\n }\n }\n }\n }\n }\n }\n ' % self.home_id) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error('Could not find current price info.') return # depends on [control=['if'], data=[]] try: home = price_info_temp['viewer']['home'] current_subscription = home['currentSubscription'] price_info = current_subscription['priceInfo']['current'] # depends on [control=['try'], data=[]] except (KeyError, TypeError): _LOGGER.error('Could not find current price info.') return # depends on [control=['except'], data=[]] if price_info: self._current_price_info = price_info # depends on [control=['if'], data=[]]
def update_main_table(self): """Write generator settings to database. """ data = (json.dumps(self.settings),) self.cursor.execute(''' CREATE TABLE IF NOT EXISTS main ( settings TEXT NOT NULL DEFAULT "{}" ) ''') self.cursor.execute('SELECT * FROM main') if self.cursor.fetchall() == []: self.cursor.execute('INSERT INTO main (settings) VALUES (?)', data) else: self.cursor.execute('UPDATE main SET settings=?', data)
def function[update_main_table, parameter[self]]: constant[Write generator settings to database. ] variable[data] assign[=] tuple[[<ast.Call object at 0x7da20cabf640>]] call[name[self].cursor.execute, parameter[constant[ CREATE TABLE IF NOT EXISTS main ( settings TEXT NOT NULL DEFAULT "{}" ) ]]] call[name[self].cursor.execute, parameter[constant[SELECT * FROM main]]] if compare[call[name[self].cursor.fetchall, parameter[]] equal[==] list[[]]] begin[:] call[name[self].cursor.execute, parameter[constant[INSERT INTO main (settings) VALUES (?)], name[data]]]
keyword[def] identifier[update_main_table] ( identifier[self] ): literal[string] identifier[data] =( identifier[json] . identifier[dumps] ( identifier[self] . identifier[settings] ),) identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] ) identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] ) keyword[if] identifier[self] . identifier[cursor] . identifier[fetchall] ()==[]: identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] , identifier[data] ) keyword[else] : identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] , identifier[data] )
def update_main_table(self): """Write generator settings to database. """ data = (json.dumps(self.settings),) self.cursor.execute('\n CREATE TABLE IF NOT EXISTS main (\n settings TEXT NOT NULL DEFAULT "{}"\n )\n ') self.cursor.execute('SELECT * FROM main') if self.cursor.fetchall() == []: self.cursor.execute('INSERT INTO main (settings) VALUES (?)', data) # depends on [control=['if'], data=[]] else: self.cursor.execute('UPDATE main SET settings=?', data)
def box(text): r"""Wrap a chunk of text in a box. Example: >>> print(box('line1\nline2')) ┌───────┐ │ line1 │ │ line2 │ └───────┘ """ lines = text.split('\n') width = max(len(l) for l in lines) top_bar = (TOP_LEFT_CORNER + HORIZONTAL_BAR * (2 + width) + TOP_RIGHT_CORNER) bottom_bar = (BOTTOM_LEFT_CORNER + HORIZONTAL_BAR * (2 + width) + BOTTOM_RIGHT_CORNER) lines = [LINES_FORMAT_STR.format(line=line, width=width) for line in lines] return top_bar + '\n' + '\n'.join(lines) + '\n' + bottom_bar
def function[box, parameter[text]]: constant[Wrap a chunk of text in a box. Example: >>> print(box('line1\nline2')) ┌───────┐ │ line1 │ │ line2 │ └───────┘ ] variable[lines] assign[=] call[name[text].split, parameter[constant[ ]]] variable[width] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b2344fd0>]] variable[top_bar] assign[=] binary_operation[binary_operation[name[TOP_LEFT_CORNER] + binary_operation[name[HORIZONTAL_BAR] * binary_operation[constant[2] + name[width]]]] + name[TOP_RIGHT_CORNER]] variable[bottom_bar] assign[=] binary_operation[binary_operation[name[BOTTOM_LEFT_CORNER] + binary_operation[name[HORIZONTAL_BAR] * binary_operation[constant[2] + name[width]]]] + name[BOTTOM_RIGHT_CORNER]] variable[lines] assign[=] <ast.ListComp object at 0x7da1b2347250> return[binary_operation[binary_operation[binary_operation[binary_operation[name[top_bar] + constant[ ]] + call[constant[ ].join, parameter[name[lines]]]] + constant[ ]] + name[bottom_bar]]]
keyword[def] identifier[box] ( identifier[text] ): literal[string] identifier[lines] = identifier[text] . identifier[split] ( literal[string] ) identifier[width] = identifier[max] ( identifier[len] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[lines] ) identifier[top_bar] =( identifier[TOP_LEFT_CORNER] + identifier[HORIZONTAL_BAR] *( literal[int] + identifier[width] )+ identifier[TOP_RIGHT_CORNER] ) identifier[bottom_bar] =( identifier[BOTTOM_LEFT_CORNER] + identifier[HORIZONTAL_BAR] *( literal[int] + identifier[width] )+ identifier[BOTTOM_RIGHT_CORNER] ) identifier[lines] =[ identifier[LINES_FORMAT_STR] . identifier[format] ( identifier[line] = identifier[line] , identifier[width] = identifier[width] ) keyword[for] identifier[line] keyword[in] identifier[lines] ] keyword[return] identifier[top_bar] + literal[string] + literal[string] . identifier[join] ( identifier[lines] )+ literal[string] + identifier[bottom_bar]
def box(text): """Wrap a chunk of text in a box. Example: >>> print(box('line1\\nline2')) ┌───────┐ │ line1 │ │ line2 │ └───────┘ """ lines = text.split('\n') width = max((len(l) for l in lines)) top_bar = TOP_LEFT_CORNER + HORIZONTAL_BAR * (2 + width) + TOP_RIGHT_CORNER bottom_bar = BOTTOM_LEFT_CORNER + HORIZONTAL_BAR * (2 + width) + BOTTOM_RIGHT_CORNER lines = [LINES_FORMAT_STR.format(line=line, width=width) for line in lines] return top_bar + '\n' + '\n'.join(lines) + '\n' + bottom_bar
def rlmb_tiny_sv2p(): """Tiny setting with a tiny sv2p model.""" hparams = rlmb_ppo_tiny() hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_tiny" hparams.grayscale = False return hparams
def function[rlmb_tiny_sv2p, parameter[]]: constant[Tiny setting with a tiny sv2p model.] variable[hparams] assign[=] call[name[rlmb_ppo_tiny], parameter[]] name[hparams].generative_model assign[=] constant[next_frame_sv2p] name[hparams].generative_model_params assign[=] constant[next_frame_sv2p_tiny] name[hparams].grayscale assign[=] constant[False] return[name[hparams]]
keyword[def] identifier[rlmb_tiny_sv2p] (): literal[string] identifier[hparams] = identifier[rlmb_ppo_tiny] () identifier[hparams] . identifier[generative_model] = literal[string] identifier[hparams] . identifier[generative_model_params] = literal[string] identifier[hparams] . identifier[grayscale] = keyword[False] keyword[return] identifier[hparams]
def rlmb_tiny_sv2p(): """Tiny setting with a tiny sv2p model.""" hparams = rlmb_ppo_tiny() hparams.generative_model = 'next_frame_sv2p' hparams.generative_model_params = 'next_frame_sv2p_tiny' hparams.grayscale = False return hparams
def build_row(self, line): """ Line describes an image or images to show Returns a dict with a list of dicts of image names or text items Examples: # A single image to display >>> x.build_row('foo.png') [{'image': 'foo.png'}] # Two images with text in between: >>> x.build_row('foo.png or bar.jpg') [{'image': 'foo.png'}, {'text': 'or'}, {'image': 'bar.png'}] """ items = [] row = dict(items=items) fields = line.split(' ') image_exts = ['.png', '.jpg'] # nothing there, carry on if not fields: return row for field in fields: ext = os.path.splitext(field)[-1] if ext.lower() in image_exts: items.append( dict(image=field)) else: items.append( dict(text=field)) return row
def function[build_row, parameter[self, line]]: constant[ Line describes an image or images to show Returns a dict with a list of dicts of image names or text items Examples: # A single image to display >>> x.build_row('foo.png') [{'image': 'foo.png'}] # Two images with text in between: >>> x.build_row('foo.png or bar.jpg') [{'image': 'foo.png'}, {'text': 'or'}, {'image': 'bar.png'}] ] variable[items] assign[=] list[[]] variable[row] assign[=] call[name[dict], parameter[]] variable[fields] assign[=] call[name[line].split, parameter[constant[ ]]] variable[image_exts] assign[=] list[[<ast.Constant object at 0x7da18f00de40>, <ast.Constant object at 0x7da18f00d240>]] if <ast.UnaryOp object at 0x7da18f00dcc0> begin[:] return[name[row]] for taget[name[field]] in starred[name[fields]] begin[:] variable[ext] assign[=] call[call[name[os].path.splitext, parameter[name[field]]]][<ast.UnaryOp object at 0x7da18f723550>] if compare[call[name[ext].lower, parameter[]] in name[image_exts]] begin[:] call[name[items].append, parameter[call[name[dict], parameter[]]]] return[name[row]]
keyword[def] identifier[build_row] ( identifier[self] , identifier[line] ): literal[string] identifier[items] =[] identifier[row] = identifier[dict] ( identifier[items] = identifier[items] ) identifier[fields] = identifier[line] . identifier[split] ( literal[string] ) identifier[image_exts] =[ literal[string] , literal[string] ] keyword[if] keyword[not] identifier[fields] : keyword[return] identifier[row] keyword[for] identifier[field] keyword[in] identifier[fields] : identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[field] )[- literal[int] ] keyword[if] identifier[ext] . identifier[lower] () keyword[in] identifier[image_exts] : identifier[items] . identifier[append] ( identifier[dict] ( identifier[image] = identifier[field] )) keyword[else] : identifier[items] . identifier[append] ( identifier[dict] ( identifier[text] = identifier[field] )) keyword[return] identifier[row]
def build_row(self, line): """ Line describes an image or images to show Returns a dict with a list of dicts of image names or text items Examples: # A single image to display >>> x.build_row('foo.png') [{'image': 'foo.png'}] # Two images with text in between: >>> x.build_row('foo.png or bar.jpg') [{'image': 'foo.png'}, {'text': 'or'}, {'image': 'bar.png'}] """ items = [] row = dict(items=items) fields = line.split(' ') image_exts = ['.png', '.jpg'] # nothing there, carry on if not fields: return row # depends on [control=['if'], data=[]] for field in fields: ext = os.path.splitext(field)[-1] if ext.lower() in image_exts: items.append(dict(image=field)) # depends on [control=['if'], data=[]] else: items.append(dict(text=field)) # depends on [control=['for'], data=['field']] return row
def run_with_standalone_parser(self): """ Will run the operation as standalone with a new ArgumentParser """ parser = argparse.ArgumentParser(description=self.description()) self.configure_parser(parser) self.run(parser.parse_args())
def function[run_with_standalone_parser, parameter[self]]: constant[ Will run the operation as standalone with a new ArgumentParser ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[self].configure_parser, parameter[name[parser]]] call[name[self].run, parameter[call[name[parser].parse_args, parameter[]]]]
keyword[def] identifier[run_with_standalone_parser] ( identifier[self] ): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[self] . identifier[description] ()) identifier[self] . identifier[configure_parser] ( identifier[parser] ) identifier[self] . identifier[run] ( identifier[parser] . identifier[parse_args] ())
def run_with_standalone_parser(self): """ Will run the operation as standalone with a new ArgumentParser """ parser = argparse.ArgumentParser(description=self.description()) self.configure_parser(parser) self.run(parser.parse_args())
def char_in(string, func_name): '''return current char and step if char is in string, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function''' function = register_function(func_name, lambda char: char in string) return char_on_predicate(function)
def function[char_in, parameter[string, func_name]]: constant[return current char and step if char is in string, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function] variable[function] assign[=] call[name[register_function], parameter[name[func_name], <ast.Lambda object at 0x7da1b28fdc60>]] return[call[name[char_on_predicate], parameter[name[function]]]]
keyword[def] identifier[char_in] ( identifier[string] , identifier[func_name] ): literal[string] identifier[function] = identifier[register_function] ( identifier[func_name] , keyword[lambda] identifier[char] : identifier[char] keyword[in] identifier[string] ) keyword[return] identifier[char_on_predicate] ( identifier[function] )
def char_in(string, func_name): """return current char and step if char is in string, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function""" function = register_function(func_name, lambda char: char in string) return char_on_predicate(function)
def interactive_update_profile_vars(self): """ Function to update the `cloudgenix.API` object with profile info. Run after login or client login. **Returns:** Boolean on success/failure, """ profile = self._parent_class.get.profile() if profile.cgx_status: # if successful, save tenant id and email info to cli state. self._parent_class.tenant_id = profile.cgx_content.get('tenant_id') self._parent_class.email = profile.cgx_content.get('email') self._parent_class._user_id = profile.cgx_content.get('id') self._parent_class.roles = profile.cgx_content.get('roles', []) self._parent_class.token_session = profile.cgx_content.get('token_session') return True else: print("Profile retrieval failed.") # clear password out of memory self._parent_class._password = None return False
def function[interactive_update_profile_vars, parameter[self]]: constant[ Function to update the `cloudgenix.API` object with profile info. Run after login or client login. **Returns:** Boolean on success/failure, ] variable[profile] assign[=] call[name[self]._parent_class.get.profile, parameter[]] if name[profile].cgx_status begin[:] name[self]._parent_class.tenant_id assign[=] call[name[profile].cgx_content.get, parameter[constant[tenant_id]]] name[self]._parent_class.email assign[=] call[name[profile].cgx_content.get, parameter[constant[email]]] name[self]._parent_class._user_id assign[=] call[name[profile].cgx_content.get, parameter[constant[id]]] name[self]._parent_class.roles assign[=] call[name[profile].cgx_content.get, parameter[constant[roles], list[[]]]] name[self]._parent_class.token_session assign[=] call[name[profile].cgx_content.get, parameter[constant[token_session]]] return[constant[True]]
keyword[def] identifier[interactive_update_profile_vars] ( identifier[self] ): literal[string] identifier[profile] = identifier[self] . identifier[_parent_class] . identifier[get] . identifier[profile] () keyword[if] identifier[profile] . identifier[cgx_status] : identifier[self] . identifier[_parent_class] . identifier[tenant_id] = identifier[profile] . identifier[cgx_content] . identifier[get] ( literal[string] ) identifier[self] . identifier[_parent_class] . identifier[email] = identifier[profile] . identifier[cgx_content] . identifier[get] ( literal[string] ) identifier[self] . identifier[_parent_class] . identifier[_user_id] = identifier[profile] . identifier[cgx_content] . identifier[get] ( literal[string] ) identifier[self] . identifier[_parent_class] . identifier[roles] = identifier[profile] . identifier[cgx_content] . identifier[get] ( literal[string] ,[]) identifier[self] . identifier[_parent_class] . identifier[token_session] = identifier[profile] . identifier[cgx_content] . identifier[get] ( literal[string] ) keyword[return] keyword[True] keyword[else] : identifier[print] ( literal[string] ) identifier[self] . identifier[_parent_class] . identifier[_password] = keyword[None] keyword[return] keyword[False]
def interactive_update_profile_vars(self): """ Function to update the `cloudgenix.API` object with profile info. Run after login or client login. **Returns:** Boolean on success/failure, """ profile = self._parent_class.get.profile() if profile.cgx_status: # if successful, save tenant id and email info to cli state. self._parent_class.tenant_id = profile.cgx_content.get('tenant_id') self._parent_class.email = profile.cgx_content.get('email') self._parent_class._user_id = profile.cgx_content.get('id') self._parent_class.roles = profile.cgx_content.get('roles', []) self._parent_class.token_session = profile.cgx_content.get('token_session') return True # depends on [control=['if'], data=[]] else: print('Profile retrieval failed.') # clear password out of memory self._parent_class._password = None return False
def join(args): """ %prog join file1.txt(pivotfile) file2.txt .. Join tabular-like files based on common column. --column specifies the column index to pivot on. Use comma to separate multiple values if the pivot column is different in each file. Maintain the order in the first file. --sep specifies the column separators, default to tab. Use comma to separate multiple values if the column separator is different in each file. """ p = OptionParser(join.__doc__) p.add_option("--column", default="0", help="0-based column id, multiple values allowed [default: %default]") p.set_sep(multiple=True) p.add_option("--noheader", default=False, action="store_true", help="Do not print header [default: %default]") p.add_option("--na", default="na", help="Value for unjoined data [default: %default]") p.add_option("--compact", default=False, action="store_true", help="Do not repeat pivotal columns in output") p.add_option("--keysep", default=",", help="specify separator joining multiple elements in the key column" + " of the pivot file [default: %default]") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) keysep = opts.keysep compact = opts.compact if len(args) < 2: sys.exit(not p.print_help()) na = opts.na c = opts.column if "," in c: cc = [int(x) for x in c.split(",")] else: cc = [int(c)] * nargs assert len(cc) == nargs, "Column index number != File number" s = opts.sep if "," in s: ss = [x for x in s.split(",")] else: ss = [s] * nargs assert len(ss) == nargs, "column separator number != File number" # Maintain the first file line order, and combine other files into it pivotfile = args[0] files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \ for f, c, s in zip(args, cc, ss)] otherfiles = files[1:] # The header contains filenames headers = [] for i, x in enumerate(files): ncols = x.ncols if i and compact: ncols -= 1 headers += [op.basename(x.filename)] * ncols header = "\t".join(headers) fp = must_open(pivotfile) fw = must_open(opts.outfile, "w") if not opts.noheader: print(header, file=fw) for row in fp: row = row.rstrip() atoms = row.split(ss[0]) newrow = atoms key = atoms[cc[0]] keys = key.split(keysep) if keysep in key else [key] for d in otherfiles: drows = list() for key in keys: krow = d.get(key, [na] * d.ncols) if compact: krow.pop(d.keypos) drows.append(krow) drow = [keysep.join(x) for x in list(zip(*drows))] newrow += drow print("\t".join(newrow), file=fw)
def function[join, parameter[args]]: constant[ %prog join file1.txt(pivotfile) file2.txt .. Join tabular-like files based on common column. --column specifies the column index to pivot on. Use comma to separate multiple values if the pivot column is different in each file. Maintain the order in the first file. --sep specifies the column separators, default to tab. Use comma to separate multiple values if the column separator is different in each file. ] variable[p] assign[=] call[name[OptionParser], parameter[name[join].__doc__]] call[name[p].add_option, parameter[constant[--column]]] call[name[p].set_sep, parameter[]] call[name[p].add_option, parameter[constant[--noheader]]] call[name[p].add_option, parameter[constant[--na]]] call[name[p].add_option, parameter[constant[--compact]]] call[name[p].add_option, parameter[constant[--keysep]]] call[name[p].set_outfile, parameter[]] <ast.Tuple object at 0x7da18f721510> assign[=] call[name[p].parse_args, parameter[name[args]]] variable[nargs] assign[=] call[name[len], parameter[name[args]]] variable[keysep] assign[=] name[opts].keysep variable[compact] assign[=] name[opts].compact if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f7237c0>]] variable[na] assign[=] name[opts].na variable[c] assign[=] name[opts].column if compare[constant[,] in name[c]] begin[:] variable[cc] assign[=] <ast.ListComp object at 0x7da18f720ca0> assert[compare[call[name[len], parameter[name[cc]]] equal[==] name[nargs]]] variable[s] assign[=] name[opts].sep if compare[constant[,] in name[s]] begin[:] variable[ss] assign[=] <ast.ListComp object at 0x7da2054a7190> assert[compare[call[name[len], parameter[name[ss]]] equal[==] name[nargs]]] variable[pivotfile] assign[=] call[name[args]][constant[0]] variable[files] assign[=] <ast.ListComp object at 0x7da2054a7160> variable[otherfiles] assign[=] call[name[files]][<ast.Slice object at 0x7da2054a6500>] variable[headers] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2054a7a60>, <ast.Name object at 0x7da2054a6650>]]] in starred[call[name[enumerate], parameter[name[files]]]] begin[:] variable[ncols] assign[=] name[x].ncols if <ast.BoolOp object at 0x7da2054a69b0> begin[:] <ast.AugAssign object at 0x7da2054a7e50> <ast.AugAssign object at 0x7da2054a76d0> variable[header] assign[=] call[constant[ ].join, parameter[name[headers]]] variable[fp] assign[=] call[name[must_open], parameter[name[pivotfile]]] variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]] if <ast.UnaryOp object at 0x7da2054a7820> begin[:] call[name[print], parameter[name[header]]] for taget[name[row]] in starred[name[fp]] begin[:] variable[row] assign[=] call[name[row].rstrip, parameter[]] variable[atoms] assign[=] call[name[row].split, parameter[call[name[ss]][constant[0]]]] variable[newrow] assign[=] name[atoms] variable[key] assign[=] call[name[atoms]][call[name[cc]][constant[0]]] variable[keys] assign[=] <ast.IfExp object at 0x7da2054a6a10> for taget[name[d]] in starred[name[otherfiles]] begin[:] variable[drows] assign[=] call[name[list], parameter[]] for taget[name[key]] in starred[name[keys]] begin[:] variable[krow] assign[=] call[name[d].get, parameter[name[key], binary_operation[list[[<ast.Name object at 0x7da2054a5420>]] * name[d].ncols]]] if name[compact] begin[:] call[name[krow].pop, parameter[name[d].keypos]] call[name[drows].append, parameter[name[krow]]] variable[drow] assign[=] <ast.ListComp object at 0x7da2054a63b0> <ast.AugAssign object at 0x7da2054a69e0> call[name[print], parameter[call[constant[ ].join, parameter[name[newrow]]]]]
keyword[def] identifier[join] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[join] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[set_sep] ( identifier[multiple] = keyword[True] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[p] . identifier[set_outfile] () identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) identifier[nargs] = identifier[len] ( identifier[args] ) identifier[keysep] = identifier[opts] . identifier[keysep] identifier[compact] = identifier[opts] . identifier[compact] keyword[if] identifier[len] ( identifier[args] )< literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[na] = identifier[opts] . identifier[na] identifier[c] = identifier[opts] . identifier[column] keyword[if] literal[string] keyword[in] identifier[c] : identifier[cc] =[ identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[c] . identifier[split] ( literal[string] )] keyword[else] : identifier[cc] =[ identifier[int] ( identifier[c] )]* identifier[nargs] keyword[assert] identifier[len] ( identifier[cc] )== identifier[nargs] , literal[string] identifier[s] = identifier[opts] . identifier[sep] keyword[if] literal[string] keyword[in] identifier[s] : identifier[ss] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[s] . identifier[split] ( literal[string] )] keyword[else] : identifier[ss] =[ identifier[s] ]* identifier[nargs] keyword[assert] identifier[len] ( identifier[ss] )== identifier[nargs] , literal[string] identifier[pivotfile] = identifier[args] [ literal[int] ] identifier[files] =[ identifier[DictFile] ( identifier[f] , identifier[keypos] = identifier[c] , identifier[valuepos] = keyword[None] , identifier[delimiter] = identifier[s] ) keyword[for] identifier[f] , identifier[c] , identifier[s] keyword[in] identifier[zip] ( identifier[args] , identifier[cc] , identifier[ss] )] identifier[otherfiles] = identifier[files] [ literal[int] :] identifier[headers] =[] keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[files] ): identifier[ncols] = identifier[x] . identifier[ncols] keyword[if] identifier[i] keyword[and] identifier[compact] : identifier[ncols] -= literal[int] identifier[headers] +=[ identifier[op] . identifier[basename] ( identifier[x] . identifier[filename] )]* identifier[ncols] identifier[header] = literal[string] . identifier[join] ( identifier[headers] ) identifier[fp] = identifier[must_open] ( identifier[pivotfile] ) identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] ) keyword[if] keyword[not] identifier[opts] . identifier[noheader] : identifier[print] ( identifier[header] , identifier[file] = identifier[fw] ) keyword[for] identifier[row] keyword[in] identifier[fp] : identifier[row] = identifier[row] . identifier[rstrip] () identifier[atoms] = identifier[row] . identifier[split] ( identifier[ss] [ literal[int] ]) identifier[newrow] = identifier[atoms] identifier[key] = identifier[atoms] [ identifier[cc] [ literal[int] ]] identifier[keys] = identifier[key] . identifier[split] ( identifier[keysep] ) keyword[if] identifier[keysep] keyword[in] identifier[key] keyword[else] [ identifier[key] ] keyword[for] identifier[d] keyword[in] identifier[otherfiles] : identifier[drows] = identifier[list] () keyword[for] identifier[key] keyword[in] identifier[keys] : identifier[krow] = identifier[d] . identifier[get] ( identifier[key] ,[ identifier[na] ]* identifier[d] . identifier[ncols] ) keyword[if] identifier[compact] : identifier[krow] . identifier[pop] ( identifier[d] . identifier[keypos] ) identifier[drows] . identifier[append] ( identifier[krow] ) identifier[drow] =[ identifier[keysep] . identifier[join] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[list] ( identifier[zip] (* identifier[drows] ))] identifier[newrow] += identifier[drow] identifier[print] ( literal[string] . identifier[join] ( identifier[newrow] ), identifier[file] = identifier[fw] )
def join(args): """ %prog join file1.txt(pivotfile) file2.txt .. Join tabular-like files based on common column. --column specifies the column index to pivot on. Use comma to separate multiple values if the pivot column is different in each file. Maintain the order in the first file. --sep specifies the column separators, default to tab. Use comma to separate multiple values if the column separator is different in each file. """ p = OptionParser(join.__doc__) p.add_option('--column', default='0', help='0-based column id, multiple values allowed [default: %default]') p.set_sep(multiple=True) p.add_option('--noheader', default=False, action='store_true', help='Do not print header [default: %default]') p.add_option('--na', default='na', help='Value for unjoined data [default: %default]') p.add_option('--compact', default=False, action='store_true', help='Do not repeat pivotal columns in output') p.add_option('--keysep', default=',', help='specify separator joining multiple elements in the key column' + ' of the pivot file [default: %default]') p.set_outfile() (opts, args) = p.parse_args(args) nargs = len(args) keysep = opts.keysep compact = opts.compact if len(args) < 2: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] na = opts.na c = opts.column if ',' in c: cc = [int(x) for x in c.split(',')] # depends on [control=['if'], data=['c']] else: cc = [int(c)] * nargs assert len(cc) == nargs, 'Column index number != File number' s = opts.sep if ',' in s: ss = [x for x in s.split(',')] # depends on [control=['if'], data=['s']] else: ss = [s] * nargs assert len(ss) == nargs, 'column separator number != File number' # Maintain the first file line order, and combine other files into it pivotfile = args[0] files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) for (f, c, s) in zip(args, cc, ss)] otherfiles = files[1:] # The header contains filenames headers = [] for (i, x) in enumerate(files): ncols = x.ncols if i and compact: ncols -= 1 # depends on [control=['if'], data=[]] headers += [op.basename(x.filename)] * ncols # depends on [control=['for'], data=[]] header = '\t'.join(headers) fp = must_open(pivotfile) fw = must_open(opts.outfile, 'w') if not opts.noheader: print(header, file=fw) # depends on [control=['if'], data=[]] for row in fp: row = row.rstrip() atoms = row.split(ss[0]) newrow = atoms key = atoms[cc[0]] keys = key.split(keysep) if keysep in key else [key] for d in otherfiles: drows = list() for key in keys: krow = d.get(key, [na] * d.ncols) if compact: krow.pop(d.keypos) # depends on [control=['if'], data=[]] drows.append(krow) # depends on [control=['for'], data=['key']] drow = [keysep.join(x) for x in list(zip(*drows))] newrow += drow # depends on [control=['for'], data=['d']] print('\t'.join(newrow), file=fw) # depends on [control=['for'], data=['row']]
def _writeWordFromSentenceInBlock(word, blockID, sentenceID, wordID): '''writes the word from a sentence in a block to a file with the id''' with open("wordIDs.txt", "a") as fp: fp.write("wordID: " + str(blockID) + "_" + str(sentenceID) + "_" + str(wordID) + "\n") fp.write("wordString: " + word + "\n") fp.write("\n")
def function[_writeWordFromSentenceInBlock, parameter[word, blockID, sentenceID, wordID]]: constant[writes the word from a sentence in a block to a file with the id] with call[name[open], parameter[constant[wordIDs.txt], constant[a]]] begin[:] call[name[fp].write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[wordID: ] + call[name[str], parameter[name[blockID]]]] + constant[_]] + call[name[str], parameter[name[sentenceID]]]] + constant[_]] + call[name[str], parameter[name[wordID]]]] + constant[ ]]]] call[name[fp].write, parameter[binary_operation[binary_operation[constant[wordString: ] + name[word]] + constant[ ]]]] call[name[fp].write, parameter[constant[ ]]]
keyword[def] identifier[_writeWordFromSentenceInBlock] ( identifier[word] , identifier[blockID] , identifier[sentenceID] , identifier[wordID] ): literal[string] keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fp] : identifier[fp] . identifier[write] ( literal[string] + identifier[str] ( identifier[blockID] )+ literal[string] + identifier[str] ( identifier[sentenceID] )+ literal[string] + identifier[str] ( identifier[wordID] )+ literal[string] ) identifier[fp] . identifier[write] ( literal[string] + identifier[word] + literal[string] ) identifier[fp] . identifier[write] ( literal[string] )
def _writeWordFromSentenceInBlock(word, blockID, sentenceID, wordID): """writes the word from a sentence in a block to a file with the id""" with open('wordIDs.txt', 'a') as fp: fp.write('wordID: ' + str(blockID) + '_' + str(sentenceID) + '_' + str(wordID) + '\n') fp.write('wordString: ' + word + '\n') fp.write('\n') # depends on [control=['with'], data=['fp']]
def get_user(name, profile='github', user_details=False): ''' Get a GitHub user by name. name The user for which to obtain information. profile The name of the profile configuration to use. Defaults to ``github``. user_details Prints user information details. Defaults to ``False``. If the user is already in the organization and user_details is set to False, the get_user function returns ``True``. If the user is not already present in the organization, user details will be printed by default. CLI Example: .. code-block:: bash salt myminion github.get_user github-handle salt myminion github.get_user github-handle user_details=true ''' if not user_details and name in list_users(profile): # User is in the org, no need for additional Data return True response = {} client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) try: user = client.get_user(name) except UnknownObjectException: log.exception("Resource not found") return False response['company'] = user.company response['created_at'] = user.created_at response['email'] = user.email response['html_url'] = user.html_url response['id'] = user.id response['login'] = user.login response['name'] = user.name response['type'] = user.type response['url'] = user.url try: headers, data = organization._requester.requestJsonAndCheck( "GET", organization.url + "/memberships/" + user._identity ) except UnknownObjectException: response['membership_state'] = 'nonexistent' response['in_org'] = False return response response['in_org'] = organization.has_in_members(user) response['membership_state'] = data.get('state') return response
def function[get_user, parameter[name, profile, user_details]]: constant[ Get a GitHub user by name. name The user for which to obtain information. profile The name of the profile configuration to use. Defaults to ``github``. user_details Prints user information details. Defaults to ``False``. If the user is already in the organization and user_details is set to False, the get_user function returns ``True``. If the user is not already present in the organization, user details will be printed by default. CLI Example: .. code-block:: bash salt myminion github.get_user github-handle salt myminion github.get_user github-handle user_details=true ] if <ast.BoolOp object at 0x7da1b1c5ef20> begin[:] return[constant[True]] variable[response] assign[=] dictionary[[], []] variable[client] assign[=] call[name[_get_client], parameter[name[profile]]] variable[organization] assign[=] call[name[client].get_organization, parameter[call[name[_get_config_value], parameter[name[profile], constant[org_name]]]]] <ast.Try object at 0x7da1b1c5f640> call[name[response]][constant[company]] assign[=] name[user].company call[name[response]][constant[created_at]] assign[=] name[user].created_at call[name[response]][constant[email]] assign[=] name[user].email call[name[response]][constant[html_url]] assign[=] name[user].html_url call[name[response]][constant[id]] assign[=] name[user].id call[name[response]][constant[login]] assign[=] name[user].login call[name[response]][constant[name]] assign[=] name[user].name call[name[response]][constant[type]] assign[=] name[user].type call[name[response]][constant[url]] assign[=] name[user].url <ast.Try object at 0x7da1b1c63640> call[name[response]][constant[in_org]] assign[=] call[name[organization].has_in_members, parameter[name[user]]] call[name[response]][constant[membership_state]] assign[=] call[name[data].get, parameter[constant[state]]] return[name[response]]
keyword[def] identifier[get_user] ( identifier[name] , identifier[profile] = literal[string] , identifier[user_details] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[user_details] keyword[and] identifier[name] keyword[in] identifier[list_users] ( identifier[profile] ): keyword[return] keyword[True] identifier[response] ={} identifier[client] = identifier[_get_client] ( identifier[profile] ) identifier[organization] = identifier[client] . identifier[get_organization] ( identifier[_get_config_value] ( identifier[profile] , literal[string] ) ) keyword[try] : identifier[user] = identifier[client] . identifier[get_user] ( identifier[name] ) keyword[except] identifier[UnknownObjectException] : identifier[log] . identifier[exception] ( literal[string] ) keyword[return] keyword[False] identifier[response] [ literal[string] ]= identifier[user] . identifier[company] identifier[response] [ literal[string] ]= identifier[user] . identifier[created_at] identifier[response] [ literal[string] ]= identifier[user] . identifier[email] identifier[response] [ literal[string] ]= identifier[user] . identifier[html_url] identifier[response] [ literal[string] ]= identifier[user] . identifier[id] identifier[response] [ literal[string] ]= identifier[user] . identifier[login] identifier[response] [ literal[string] ]= identifier[user] . identifier[name] identifier[response] [ literal[string] ]= identifier[user] . identifier[type] identifier[response] [ literal[string] ]= identifier[user] . identifier[url] keyword[try] : identifier[headers] , identifier[data] = identifier[organization] . identifier[_requester] . identifier[requestJsonAndCheck] ( literal[string] , identifier[organization] . identifier[url] + literal[string] + identifier[user] . identifier[_identity] ) keyword[except] identifier[UnknownObjectException] : identifier[response] [ literal[string] ]= literal[string] identifier[response] [ literal[string] ]= keyword[False] keyword[return] identifier[response] identifier[response] [ literal[string] ]= identifier[organization] . identifier[has_in_members] ( identifier[user] ) identifier[response] [ literal[string] ]= identifier[data] . identifier[get] ( literal[string] ) keyword[return] identifier[response]
def get_user(name, profile='github', user_details=False): """ Get a GitHub user by name. name The user for which to obtain information. profile The name of the profile configuration to use. Defaults to ``github``. user_details Prints user information details. Defaults to ``False``. If the user is already in the organization and user_details is set to False, the get_user function returns ``True``. If the user is not already present in the organization, user details will be printed by default. CLI Example: .. code-block:: bash salt myminion github.get_user github-handle salt myminion github.get_user github-handle user_details=true """ if not user_details and name in list_users(profile): # User is in the org, no need for additional Data return True # depends on [control=['if'], data=[]] response = {} client = _get_client(profile) organization = client.get_organization(_get_config_value(profile, 'org_name')) try: user = client.get_user(name) # depends on [control=['try'], data=[]] except UnknownObjectException: log.exception('Resource not found') return False # depends on [control=['except'], data=[]] response['company'] = user.company response['created_at'] = user.created_at response['email'] = user.email response['html_url'] = user.html_url response['id'] = user.id response['login'] = user.login response['name'] = user.name response['type'] = user.type response['url'] = user.url try: (headers, data) = organization._requester.requestJsonAndCheck('GET', organization.url + '/memberships/' + user._identity) # depends on [control=['try'], data=[]] except UnknownObjectException: response['membership_state'] = 'nonexistent' response['in_org'] = False return response # depends on [control=['except'], data=[]] response['in_org'] = organization.has_in_members(user) response['membership_state'] = data.get('state') return response
def remove_repeated_comments(node): """Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence. """ last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if comment['text'] == last_comment['text']: anno.delanno(_node, 'comment') last_comment = comment return node
def function[remove_repeated_comments, parameter[node]]: constant[Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence. ] variable[last_comment] assign[=] dictionary[[<ast.Constant object at 0x7da18bccb340>], [<ast.Constant object at 0x7da18bccba00>]] for taget[name[_node]] in starred[call[name[gast].walk, parameter[name[node]]]] begin[:] if call[name[anno].hasanno, parameter[name[_node], constant[comment]]] begin[:] variable[comment] assign[=] call[name[anno].getanno, parameter[name[_node], constant[comment]]] if compare[call[name[comment]][constant[text]] equal[==] call[name[last_comment]][constant[text]]] begin[:] call[name[anno].delanno, parameter[name[_node], constant[comment]]] variable[last_comment] assign[=] name[comment] return[name[node]]
keyword[def] identifier[remove_repeated_comments] ( identifier[node] ): literal[string] identifier[last_comment] ={ literal[string] : keyword[None] } keyword[for] identifier[_node] keyword[in] identifier[gast] . identifier[walk] ( identifier[node] ): keyword[if] identifier[anno] . identifier[hasanno] ( identifier[_node] , literal[string] ): identifier[comment] = identifier[anno] . identifier[getanno] ( identifier[_node] , literal[string] ) keyword[if] identifier[comment] [ literal[string] ]== identifier[last_comment] [ literal[string] ]: identifier[anno] . identifier[delanno] ( identifier[_node] , literal[string] ) identifier[last_comment] = identifier[comment] keyword[return] identifier[node]
def remove_repeated_comments(node): """Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence. """ last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if comment['text'] == last_comment['text']: anno.delanno(_node, 'comment') # depends on [control=['if'], data=[]] last_comment = comment # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_node']] return node
def _get_simsuccessors(self, addr, job, current_function_addr=None): """ Create the SimSuccessors instance for a block. :param int addr: Address of the block. :param CFGJob job: The CFG job instance with an input state inside. :param int current_function_addr: Address of the current function. :return: A SimSuccessors instance :rtype: SimSuccessors """ exception_info = None state = job.state saved_state = job.state # We don't have to make a copy here # respect the basic block size from base graph block_size = None if self._base_graph is not None: for n in self._base_graph.nodes(): if n.addr == addr: block_size = n.size break try: sim_successors = None if not self._keep_state: if self.project.is_hooked(addr): old_proc = self.project._sim_procedures[addr] is_continuation = old_proc.is_continuation elif self.project.simos.is_syscall_addr(addr): old_proc = self.project.simos.syscall_from_addr(addr) is_continuation = False # syscalls don't support continuation else: old_proc = None is_continuation = None if old_proc is not None and \ not is_continuation and \ not old_proc.ADDS_EXITS and \ not old_proc.NO_RET: # DON'T CREATE USELESS SIMPROCEDURES if we don't care about the accuracy of states # When generating CFG, a SimProcedure will not be created as it is but be created as a # ReturnUnconstrained stub if it satisfies the following conditions: # - It doesn't add any new exits. # - It returns as normal. # In this way, we can speed up the CFG generation by quite a lot as we avoid simulating # those functions like read() and puts(), which has no impact on the overall control flow at all. # # Special notes about SimProcedure continuation: Any SimProcedure instance that is a continuation # will add new exits, otherwise the original SimProcedure wouldn't have been executed anyway. Hence # it's reasonable for us to always simulate a SimProcedure with continuation. old_name = None if old_proc.is_syscall: new_stub = SIM_PROCEDURES["stubs"]["syscall"] ret_to = state.regs.ip_at_syscall else: # normal SimProcedures new_stub = SIM_PROCEDURES["stubs"]["ReturnUnconstrained"] ret_to = None old_name = old_proc.display_name # instantiate the stub new_stub_inst = new_stub(display_name=old_name) sim_successors = self.project.engines.procedure_engine.process( state, new_stub_inst, force_addr=addr, ret_to=ret_to, ) if sim_successors is None: jumpkind = state.history.jumpkind jumpkind = 'Ijk_Boring' if jumpkind is None else jumpkind sim_successors = self.project.factory.successors( state, jumpkind=jumpkind, size=block_size, opt_level=self._iropt_level) except (SimFastPathError, SimSolverModeError) as ex: if saved_state.mode == 'fastpath': # Got a SimFastPathError or SimSolverModeError in FastPath mode. # We wanna switch to symbolic mode for current IRSB. l.debug('Switch to symbolic mode for address %#x', addr) # Make a copy of the current 'fastpath' state l.debug('Symbolic jumps at basic block %#x.', addr) new_state = None if addr != current_function_addr: new_state = self._get_symbolic_function_initial_state(current_function_addr) if new_state is None: new_state = state.copy() new_state.set_mode('symbolic') new_state.options.add(o.DO_RET_EMULATION) # Remove bad constraints # FIXME: This is so hackish... new_state.solver._solver.constraints = [c for c in new_state.solver.constraints if c.op != 'BoolV' or c.args[0] is not False] new_state.solver._solver._result = None # Swap them saved_state, job.state = job.state, new_state sim_successors, exception_info, _ = self._get_simsuccessors(addr, job) else: exception_info = sys.exc_info() # Got a SimSolverModeError in symbolic mode. We are screwed. # Skip this IRSB l.debug("Caught a SimIRSBError %s. Don't panic, this is usually expected.", ex) inst = SIM_PROCEDURES["stubs"]["PathTerminator"]() sim_successors = SimEngineProcedure().process(state, inst) except SimIRSBError: exception_info = sys.exc_info() # It's a tragedy that we came across some instructions that VEX # does not support. I'll create a terminating stub there l.debug("Caught a SimIRSBError during CFG recovery. Creating a PathTerminator.", exc_info=True) inst = SIM_PROCEDURES["stubs"]["PathTerminator"]() sim_successors = SimEngineProcedure().process(state, inst) except claripy.ClaripyError: exception_info = sys.exc_info() l.debug("Caught a ClaripyError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES["stubs"]["PathTerminator"]() sim_successors = SimEngineProcedure().process(state, inst) except SimError: exception_info = sys.exc_info() l.debug("Caught a SimError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES["stubs"]["PathTerminator"]() sim_successors = SimEngineProcedure().process(state, inst) except AngrExitError as ex: exception_info = sys.exc_info() l.debug("Caught a AngrExitError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES["stubs"]["PathTerminator"]() sim_successors = SimEngineProcedure().process(state, inst) except AngrError: exception_info = sys.exc_info() section = self.project.loader.main_object.find_section_containing(addr) if section is None: sec_name = 'No section' else: sec_name = section.name # AngrError shouldn't really happen though l.debug("Caught an AngrError during CFG recovery at %#x (%s)", addr, sec_name, exc_info=True) # We might be on a wrong branch, and is likely to encounter the # "No bytes in memory xxx" exception # Just ignore it sim_successors = None return sim_successors, exception_info, saved_state
def function[_get_simsuccessors, parameter[self, addr, job, current_function_addr]]: constant[ Create the SimSuccessors instance for a block. :param int addr: Address of the block. :param CFGJob job: The CFG job instance with an input state inside. :param int current_function_addr: Address of the current function. :return: A SimSuccessors instance :rtype: SimSuccessors ] variable[exception_info] assign[=] constant[None] variable[state] assign[=] name[job].state variable[saved_state] assign[=] name[job].state variable[block_size] assign[=] constant[None] if compare[name[self]._base_graph is_not constant[None]] begin[:] for taget[name[n]] in starred[call[name[self]._base_graph.nodes, parameter[]]] begin[:] if compare[name[n].addr equal[==] name[addr]] begin[:] variable[block_size] assign[=] name[n].size break <ast.Try object at 0x7da20c992c50> return[tuple[[<ast.Name object at 0x7da2045655a0>, <ast.Name object at 0x7da204564fa0>, <ast.Name object at 0x7da204567b50>]]]
keyword[def] identifier[_get_simsuccessors] ( identifier[self] , identifier[addr] , identifier[job] , identifier[current_function_addr] = keyword[None] ): literal[string] identifier[exception_info] = keyword[None] identifier[state] = identifier[job] . identifier[state] identifier[saved_state] = identifier[job] . identifier[state] identifier[block_size] = keyword[None] keyword[if] identifier[self] . identifier[_base_graph] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[n] keyword[in] identifier[self] . identifier[_base_graph] . identifier[nodes] (): keyword[if] identifier[n] . identifier[addr] == identifier[addr] : identifier[block_size] = identifier[n] . identifier[size] keyword[break] keyword[try] : identifier[sim_successors] = keyword[None] keyword[if] keyword[not] identifier[self] . identifier[_keep_state] : keyword[if] identifier[self] . identifier[project] . identifier[is_hooked] ( identifier[addr] ): identifier[old_proc] = identifier[self] . identifier[project] . identifier[_sim_procedures] [ identifier[addr] ] identifier[is_continuation] = identifier[old_proc] . identifier[is_continuation] keyword[elif] identifier[self] . identifier[project] . identifier[simos] . identifier[is_syscall_addr] ( identifier[addr] ): identifier[old_proc] = identifier[self] . identifier[project] . identifier[simos] . identifier[syscall_from_addr] ( identifier[addr] ) identifier[is_continuation] = keyword[False] keyword[else] : identifier[old_proc] = keyword[None] identifier[is_continuation] = keyword[None] keyword[if] identifier[old_proc] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[is_continuation] keyword[and] keyword[not] identifier[old_proc] . identifier[ADDS_EXITS] keyword[and] keyword[not] identifier[old_proc] . identifier[NO_RET] : identifier[old_name] = keyword[None] keyword[if] identifier[old_proc] . identifier[is_syscall] : identifier[new_stub] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ] identifier[ret_to] = identifier[state] . identifier[regs] . identifier[ip_at_syscall] keyword[else] : identifier[new_stub] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ] identifier[ret_to] = keyword[None] identifier[old_name] = identifier[old_proc] . identifier[display_name] identifier[new_stub_inst] = identifier[new_stub] ( identifier[display_name] = identifier[old_name] ) identifier[sim_successors] = identifier[self] . identifier[project] . identifier[engines] . identifier[procedure_engine] . identifier[process] ( identifier[state] , identifier[new_stub_inst] , identifier[force_addr] = identifier[addr] , identifier[ret_to] = identifier[ret_to] , ) keyword[if] identifier[sim_successors] keyword[is] keyword[None] : identifier[jumpkind] = identifier[state] . identifier[history] . identifier[jumpkind] identifier[jumpkind] = literal[string] keyword[if] identifier[jumpkind] keyword[is] keyword[None] keyword[else] identifier[jumpkind] identifier[sim_successors] = identifier[self] . identifier[project] . identifier[factory] . identifier[successors] ( identifier[state] , identifier[jumpkind] = identifier[jumpkind] , identifier[size] = identifier[block_size] , identifier[opt_level] = identifier[self] . identifier[_iropt_level] ) keyword[except] ( identifier[SimFastPathError] , identifier[SimSolverModeError] ) keyword[as] identifier[ex] : keyword[if] identifier[saved_state] . identifier[mode] == literal[string] : identifier[l] . identifier[debug] ( literal[string] , identifier[addr] ) identifier[l] . identifier[debug] ( literal[string] , identifier[addr] ) identifier[new_state] = keyword[None] keyword[if] identifier[addr] != identifier[current_function_addr] : identifier[new_state] = identifier[self] . identifier[_get_symbolic_function_initial_state] ( identifier[current_function_addr] ) keyword[if] identifier[new_state] keyword[is] keyword[None] : identifier[new_state] = identifier[state] . identifier[copy] () identifier[new_state] . identifier[set_mode] ( literal[string] ) identifier[new_state] . identifier[options] . identifier[add] ( identifier[o] . identifier[DO_RET_EMULATION] ) identifier[new_state] . identifier[solver] . identifier[_solver] . identifier[constraints] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[new_state] . identifier[solver] . identifier[constraints] keyword[if] identifier[c] . identifier[op] != literal[string] keyword[or] identifier[c] . identifier[args] [ literal[int] ] keyword[is] keyword[not] keyword[False] ] identifier[new_state] . identifier[solver] . identifier[_solver] . identifier[_result] = keyword[None] identifier[saved_state] , identifier[job] . identifier[state] = identifier[job] . identifier[state] , identifier[new_state] identifier[sim_successors] , identifier[exception_info] , identifier[_] = identifier[self] . identifier[_get_simsuccessors] ( identifier[addr] , identifier[job] ) keyword[else] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[l] . identifier[debug] ( literal[string] , identifier[ex] ) identifier[inst] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ]() identifier[sim_successors] = identifier[SimEngineProcedure] (). identifier[process] ( identifier[state] , identifier[inst] ) keyword[except] identifier[SimIRSBError] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[l] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] ) identifier[inst] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ]() identifier[sim_successors] = identifier[SimEngineProcedure] (). identifier[process] ( identifier[state] , identifier[inst] ) keyword[except] identifier[claripy] . identifier[ClaripyError] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[l] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] ) identifier[inst] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ]() identifier[sim_successors] = identifier[SimEngineProcedure] (). identifier[process] ( identifier[state] , identifier[inst] ) keyword[except] identifier[SimError] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[l] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] ) identifier[inst] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ]() identifier[sim_successors] = identifier[SimEngineProcedure] (). identifier[process] ( identifier[state] , identifier[inst] ) keyword[except] identifier[AngrExitError] keyword[as] identifier[ex] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[l] . identifier[debug] ( literal[string] , identifier[exc_info] = keyword[True] ) identifier[inst] = identifier[SIM_PROCEDURES] [ literal[string] ][ literal[string] ]() identifier[sim_successors] = identifier[SimEngineProcedure] (). identifier[process] ( identifier[state] , identifier[inst] ) keyword[except] identifier[AngrError] : identifier[exception_info] = identifier[sys] . identifier[exc_info] () identifier[section] = identifier[self] . identifier[project] . identifier[loader] . identifier[main_object] . identifier[find_section_containing] ( identifier[addr] ) keyword[if] identifier[section] keyword[is] keyword[None] : identifier[sec_name] = literal[string] keyword[else] : identifier[sec_name] = identifier[section] . identifier[name] identifier[l] . identifier[debug] ( literal[string] , identifier[addr] , identifier[sec_name] , identifier[exc_info] = keyword[True] ) identifier[sim_successors] = keyword[None] keyword[return] identifier[sim_successors] , identifier[exception_info] , identifier[saved_state]
def _get_simsuccessors(self, addr, job, current_function_addr=None): """ Create the SimSuccessors instance for a block. :param int addr: Address of the block. :param CFGJob job: The CFG job instance with an input state inside. :param int current_function_addr: Address of the current function. :return: A SimSuccessors instance :rtype: SimSuccessors """ exception_info = None state = job.state saved_state = job.state # We don't have to make a copy here # respect the basic block size from base graph block_size = None if self._base_graph is not None: for n in self._base_graph.nodes(): if n.addr == addr: block_size = n.size break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=[]] try: sim_successors = None if not self._keep_state: if self.project.is_hooked(addr): old_proc = self.project._sim_procedures[addr] is_continuation = old_proc.is_continuation # depends on [control=['if'], data=[]] elif self.project.simos.is_syscall_addr(addr): old_proc = self.project.simos.syscall_from_addr(addr) is_continuation = False # syscalls don't support continuation # depends on [control=['if'], data=[]] else: old_proc = None is_continuation = None if old_proc is not None and (not is_continuation) and (not old_proc.ADDS_EXITS) and (not old_proc.NO_RET): # DON'T CREATE USELESS SIMPROCEDURES if we don't care about the accuracy of states # When generating CFG, a SimProcedure will not be created as it is but be created as a # ReturnUnconstrained stub if it satisfies the following conditions: # - It doesn't add any new exits. # - It returns as normal. # In this way, we can speed up the CFG generation by quite a lot as we avoid simulating # those functions like read() and puts(), which has no impact on the overall control flow at all. # # Special notes about SimProcedure continuation: Any SimProcedure instance that is a continuation # will add new exits, otherwise the original SimProcedure wouldn't have been executed anyway. Hence # it's reasonable for us to always simulate a SimProcedure with continuation. old_name = None if old_proc.is_syscall: new_stub = SIM_PROCEDURES['stubs']['syscall'] ret_to = state.regs.ip_at_syscall # depends on [control=['if'], data=[]] else: # normal SimProcedures new_stub = SIM_PROCEDURES['stubs']['ReturnUnconstrained'] ret_to = None old_name = old_proc.display_name # instantiate the stub new_stub_inst = new_stub(display_name=old_name) sim_successors = self.project.engines.procedure_engine.process(state, new_stub_inst, force_addr=addr, ret_to=ret_to) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if sim_successors is None: jumpkind = state.history.jumpkind jumpkind = 'Ijk_Boring' if jumpkind is None else jumpkind sim_successors = self.project.factory.successors(state, jumpkind=jumpkind, size=block_size, opt_level=self._iropt_level) # depends on [control=['if'], data=['sim_successors']] # depends on [control=['try'], data=[]] except (SimFastPathError, SimSolverModeError) as ex: if saved_state.mode == 'fastpath': # Got a SimFastPathError or SimSolverModeError in FastPath mode. # We wanna switch to symbolic mode for current IRSB. l.debug('Switch to symbolic mode for address %#x', addr) # Make a copy of the current 'fastpath' state l.debug('Symbolic jumps at basic block %#x.', addr) new_state = None if addr != current_function_addr: new_state = self._get_symbolic_function_initial_state(current_function_addr) # depends on [control=['if'], data=['current_function_addr']] if new_state is None: new_state = state.copy() new_state.set_mode('symbolic') # depends on [control=['if'], data=['new_state']] new_state.options.add(o.DO_RET_EMULATION) # Remove bad constraints # FIXME: This is so hackish... new_state.solver._solver.constraints = [c for c in new_state.solver.constraints if c.op != 'BoolV' or c.args[0] is not False] new_state.solver._solver._result = None # Swap them (saved_state, job.state) = (job.state, new_state) (sim_successors, exception_info, _) = self._get_simsuccessors(addr, job) # depends on [control=['if'], data=[]] else: exception_info = sys.exc_info() # Got a SimSolverModeError in symbolic mode. We are screwed. # Skip this IRSB l.debug("Caught a SimIRSBError %s. Don't panic, this is usually expected.", ex) inst = SIM_PROCEDURES['stubs']['PathTerminator']() sim_successors = SimEngineProcedure().process(state, inst) # depends on [control=['except'], data=['ex']] except SimIRSBError: exception_info = sys.exc_info() # It's a tragedy that we came across some instructions that VEX # does not support. I'll create a terminating stub there l.debug('Caught a SimIRSBError during CFG recovery. Creating a PathTerminator.', exc_info=True) inst = SIM_PROCEDURES['stubs']['PathTerminator']() sim_successors = SimEngineProcedure().process(state, inst) # depends on [control=['except'], data=[]] except claripy.ClaripyError: exception_info = sys.exc_info() l.debug("Caught a ClaripyError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES['stubs']['PathTerminator']() sim_successors = SimEngineProcedure().process(state, inst) # depends on [control=['except'], data=[]] except SimError: exception_info = sys.exc_info() l.debug("Caught a SimError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES['stubs']['PathTerminator']() sim_successors = SimEngineProcedure().process(state, inst) # depends on [control=['except'], data=[]] except AngrExitError as ex: exception_info = sys.exc_info() l.debug("Caught a AngrExitError during CFG recovery. Don't panic, this is usually expected.", exc_info=True) # Generate a PathTerminator to terminate the current path inst = SIM_PROCEDURES['stubs']['PathTerminator']() sim_successors = SimEngineProcedure().process(state, inst) # depends on [control=['except'], data=[]] except AngrError: exception_info = sys.exc_info() section = self.project.loader.main_object.find_section_containing(addr) if section is None: sec_name = 'No section' # depends on [control=['if'], data=[]] else: sec_name = section.name # AngrError shouldn't really happen though l.debug('Caught an AngrError during CFG recovery at %#x (%s)', addr, sec_name, exc_info=True) # We might be on a wrong branch, and is likely to encounter the # "No bytes in memory xxx" exception # Just ignore it sim_successors = None # depends on [control=['except'], data=[]] return (sim_successors, exception_info, saved_state)
def __isValidFilename(self, filename): """Determine whether filename is valid""" if filename and isinstance(filename, string_types): if re.match(r'^[\w\d\_\-\.]+$', filename, re.I): if self.__isValidTGZ(filename) or self.__isValidZIP(filename): return True return False
def function[__isValidFilename, parameter[self, filename]]: constant[Determine whether filename is valid] if <ast.BoolOp object at 0x7da1b0cba1d0> begin[:] if call[name[re].match, parameter[constant[^[\w\d\_\-\.]+$], name[filename], name[re].I]] begin[:] if <ast.BoolOp object at 0x7da1b0cb9090> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[__isValidFilename] ( identifier[self] , identifier[filename] ): literal[string] keyword[if] identifier[filename] keyword[and] identifier[isinstance] ( identifier[filename] , identifier[string_types] ): keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[filename] , identifier[re] . identifier[I] ): keyword[if] identifier[self] . identifier[__isValidTGZ] ( identifier[filename] ) keyword[or] identifier[self] . identifier[__isValidZIP] ( identifier[filename] ): keyword[return] keyword[True] keyword[return] keyword[False]
def __isValidFilename(self, filename): """Determine whether filename is valid""" if filename and isinstance(filename, string_types): if re.match('^[\\w\\d\\_\\-\\.]+$', filename, re.I): if self.__isValidTGZ(filename) or self.__isValidZIP(filename): return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return False
def to_json_data(self): """ Returns ------- A dictionary of serialized data. """ # create data d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values()) d["_comment"] = self._comment d.move_to_end("_comment", last=False) d["_external_files"] = self._dev_external_files_manager return d
def function[to_json_data, parameter[self]]: constant[ Returns ------- A dictionary of serialized data. ] variable[d] assign[=] call[name[collections].OrderedDict, parameter[<ast.GeneratorExp object at 0x7da20c795c90>]] call[name[d]][constant[_comment]] assign[=] name[self]._comment call[name[d].move_to_end, parameter[constant[_comment]]] call[name[d]][constant[_external_files]] assign[=] name[self]._dev_external_files_manager return[name[d]]
keyword[def] identifier[to_json_data] ( identifier[self] ): literal[string] identifier[d] = identifier[collections] . identifier[OrderedDict] (( identifier[t] . identifier[get_ref] (), identifier[t] . identifier[to_json_data] ()) keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_tables] . identifier[values] ()) identifier[d] [ literal[string] ]= identifier[self] . identifier[_comment] identifier[d] . identifier[move_to_end] ( literal[string] , identifier[last] = keyword[False] ) identifier[d] [ literal[string] ]= identifier[self] . identifier[_dev_external_files_manager] keyword[return] identifier[d]
def to_json_data(self): """ Returns ------- A dictionary of serialized data. """ # create data d = collections.OrderedDict(((t.get_ref(), t.to_json_data()) for t in self._tables.values())) d['_comment'] = self._comment d.move_to_end('_comment', last=False) d['_external_files'] = self._dev_external_files_manager return d
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if isdirectory and (not key.endswith("/")): key += "/" self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
def function[activate, parameter[self, path, isdirectory]]: constant[ Set up a boto connection. ] from relative_module[utils] import module[connection_with_anon], module[connection_with_gs] variable[parsed] assign[=] call[name[BotoClient].parse_query, parameter[name[path]]] variable[scheme] assign[=] call[name[parsed]][constant[0]] variable[bucket_name] assign[=] call[name[parsed]][constant[1]] variable[key] assign[=] call[name[parsed]][constant[2]] if <ast.BoolOp object at 0x7da18dc9a110> begin[:] variable[conn] assign[=] call[name[connection_with_anon], parameter[name[self].credentials]] variable[bucket] assign[=] call[name[conn].get_bucket, parameter[name[bucket_name]]] if <ast.BoolOp object at 0x7da18dc99210> begin[:] <ast.AugAssign object at 0x7da18dc9a800> name[self]._scheme assign[=] name[scheme] name[self]._conn assign[=] name[conn] name[self]._key assign[=] name[key] name[self]._bucket assign[=] name[bucket] name[self]._active assign[=] constant[True]
keyword[def] identifier[activate] ( identifier[self] , identifier[path] , identifier[isdirectory] ): literal[string] keyword[from] . identifier[utils] keyword[import] identifier[connection_with_anon] , identifier[connection_with_gs] identifier[parsed] = identifier[BotoClient] . identifier[parse_query] ( identifier[path] ) identifier[scheme] = identifier[parsed] [ literal[int] ] identifier[bucket_name] = identifier[parsed] [ literal[int] ] identifier[key] = identifier[parsed] [ literal[int] ] keyword[if] identifier[scheme] == literal[string] keyword[or] identifier[scheme] == literal[string] : identifier[conn] = identifier[connection_with_anon] ( identifier[self] . identifier[credentials] ) identifier[bucket] = identifier[conn] . identifier[get_bucket] ( identifier[bucket_name] ) keyword[elif] identifier[scheme] == literal[string] : identifier[conn] = identifier[connection_with_gs] ( identifier[bucket_name] ) identifier[bucket] = identifier[conn] . identifier[get_bucket] () keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] + identifier[scheme] ) keyword[if] identifier[isdirectory] keyword[and] ( keyword[not] identifier[key] . identifier[endswith] ( literal[string] )): identifier[key] += literal[string] identifier[self] . identifier[_scheme] = identifier[scheme] identifier[self] . identifier[_conn] = identifier[conn] identifier[self] . identifier[_key] = identifier[key] identifier[self] . identifier[_bucket] = identifier[bucket] identifier[self] . identifier[_active] = keyword[True]
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) # depends on [control=['if'], data=[]] elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() # depends on [control=['if'], data=[]] else: raise NotImplementedError('No file reader implementation for URL scheme ' + scheme) if isdirectory and (not key.endswith('/')): key += '/' # depends on [control=['if'], data=[]] self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
def Get_rhos(dur, **kwargs): ''' Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration." def Dur(rhos, **kwargs): t0 = kwargs.get('t0', 0.) time = np.linspace(t0 - 0.5, t0 + 0.5, 1000) try: t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)] except: return 0. return t[-1] - t[0] def DiffSq(rhos): return (dur - Dur(rhos, **kwargs)) ** 2 return fmin(DiffSq, [0.2], disp=False)
def function[Get_rhos, parameter[dur]]: constant[ Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ] if compare[name[ps] is constant[None]] begin[:] <ast.Raise object at 0x7da1b0fe7a60> assert[<ast.BoolOp object at 0x7da1b0fe7010>] def function[Dur, parameter[rhos]]: variable[t0] assign[=] call[name[kwargs].get, parameter[constant[t0], constant[0.0]]] variable[time] assign[=] call[name[np].linspace, parameter[binary_operation[name[t0] - constant[0.5]], binary_operation[name[t0] + constant[0.5]], constant[1000]]] <ast.Try object at 0x7da1b0fe6f80> return[binary_operation[call[name[t]][<ast.UnaryOp object at 0x7da1b0fe7a90>] - call[name[t]][constant[0]]]] def function[DiffSq, parameter[rhos]]: return[binary_operation[binary_operation[name[dur] - call[name[Dur], parameter[name[rhos]]]] ** constant[2]]] return[call[name[fmin], parameter[name[DiffSq], list[[<ast.Constant object at 0x7da1b0fe6740>]]]]]
keyword[def] identifier[Get_rhos] ( identifier[dur] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[ps] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[assert] identifier[dur] >= literal[int] keyword[and] identifier[dur] <= literal[int] , literal[string] keyword[def] identifier[Dur] ( identifier[rhos] ,** identifier[kwargs] ): identifier[t0] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ) identifier[time] = identifier[np] . identifier[linspace] ( identifier[t0] - literal[int] , identifier[t0] + literal[int] , literal[int] ) keyword[try] : identifier[t] = identifier[time] [ identifier[np] . identifier[where] ( identifier[ps] . identifier[Transit] ( identifier[rhos] = identifier[rhos] ,** identifier[kwargs] )( identifier[time] )< literal[int] )] keyword[except] : keyword[return] literal[int] keyword[return] identifier[t] [- literal[int] ]- identifier[t] [ literal[int] ] keyword[def] identifier[DiffSq] ( identifier[rhos] ): keyword[return] ( identifier[dur] - identifier[Dur] ( identifier[rhos] ,** identifier[kwargs] ))** literal[int] keyword[return] identifier[fmin] ( identifier[DiffSq] ,[ literal[int] ], identifier[disp] = keyword[False] )
def Get_rhos(dur, **kwargs): """ Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. """ if ps is None: raise Exception('Unable to import `pysyzygy`.') # depends on [control=['if'], data=[]] assert dur >= 0.01 and dur <= 0.5, 'Invalid value for the duration.' def Dur(rhos, **kwargs): t0 = kwargs.get('t0', 0.0) time = np.linspace(t0 - 0.5, t0 + 0.5, 1000) try: t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)] # depends on [control=['try'], data=[]] except: return 0.0 # depends on [control=['except'], data=[]] return t[-1] - t[0] def DiffSq(rhos): return (dur - Dur(rhos, **kwargs)) ** 2 return fmin(DiffSq, [0.2], disp=False)
def plot_profile(ribo_counts, transcript_name, transcript_length, start_stops, read_lengths=None, read_offsets=None, rna_counts=None, color_scheme='default', html_file='index.html', output_path='output'): """Plot read counts (in all 3 frames) and RNA coverage if provided for a single transcript. """ colors = get_color_palette(scheme=color_scheme) gs = gridspec.GridSpec(3, 1, height_ratios=[6, 1.3, 0.5], hspace=0.35) font_axis = {'family': 'sans-serif', 'color': colors['color'], 'weight': 'bold', 'size': 7} # riboseq bar plots gs2 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[0]) ax2 = plt.subplot(gs2[0]) label = 'Ribo-Seq count' if read_lengths: if len(read_lengths) > 1: label = 'Ribo-Seq count ({}-mers)'.format(', '.join('{}'.format(item) for item in read_lengths)) else: label = 'Ribo-Seq count ({}-mer)'.format('{}'.format(read_lengths[0])) ax2.set_ylabel(label, fontdict=font_axis, labelpad=10) # rna coverage if available ax_rna = None if rna_counts: ax_rna = ax2.twinx() ax_rna.set_ylabel('RNA-Seq count', fontdict=font_axis, labelpad=10) ax_rna.bar(rna_counts.keys(), rna_counts.values(), facecolor=colors['rna'], edgecolor=colors['rna'], label='RNA') ax_rna.set_zorder(1) frame_counts = {1: {}, 2: {}, 3: {}} for k, v in ribo_counts.iteritems(): for fr in (1, 2, 3): if v[fr] > 0: frame_counts[fr][k] = v[fr] break cnts = [] [cnts.extend(item.values()) for item in frame_counts.values()] y_max = float(max(cnts) * 1.25) ax2.set_ylim(0.0, y_max) ax2.set_zorder(2) ax2.patch.set_facecolor('none') for frame in (1, 2, 3): color = colors['frames'][frame - 1] x_vals = frame_counts[frame].keys() ax2.bar(x_vals, frame_counts[frame].values(), color=color, facecolor=color, edgecolor=color) # ORF architecture gs3 = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1], hspace=0.1) if color_scheme == 'greyorfs': axisbg = [colors['grey'] for i in range(3)] else: axisbg = colors['frames'] ax4 = plt.subplot(gs3[0], sharex=ax2, axisbg=axisbg[0]) ax5 = plt.subplot(gs3[1], sharex=ax2, axisbg=axisbg[1]) ax6 = plt.subplot(gs3[2], sharex=ax2, axisbg=axisbg[2]) ax6.set_xlabel('Transcript length ({} nt)'.format(transcript_length), fontdict=font_axis, labelpad=6) # Legend gs4 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[2], hspace=0.1) ax7 = plt.subplot(gs4[0], axisbg=colors['background']) set_axis_color(ax7, colors['background']) ax7.text(0.02, 0.1, "AUG", size=5, ha="center", va="center", color=colors['color'], bbox=dict(boxstyle="square", facecolor=colors['start'], edgecolor=colors['color'], linewidth=0.3)) ax7.text(0.06, 0.1, "STOP", size=5, ha="center", va="center", color='white', bbox=dict(boxstyle="square", color=colors['stop'])) ax7.text(0.13, 0.1, "Frames", size=5, ha='center', va='center', color=colors['color'], fontdict={'weight': 'bold'}) ax7.text(0.17, 0.1, "1", size=5, ha="center", va="center", color='white', bbox=dict(boxstyle="square", color=colors['frames'][0])) ax7.text(0.19, 0.1, "2", size=5, ha="center", va="center", color='white', bbox=dict(boxstyle="square", color=colors['frames'][1])) ax7.text(0.21, 0.1, "3", size=5, ha="center", va="center", color='white', bbox=dict(boxstyle="square", color=colors['frames'][2])) # No ticks or labels for ORF 1, 2 and Legend for axis in (ax4, ax5, ax7): axis.tick_params(top=False, left=False, right=False, bottom=False, labeltop=False, labelleft=False, labelright=False, labelbottom=False) axes = [ax2] if ax_rna: axes.append(ax_rna) fp = FontProperties(size='5') for axis in axes: set_axis_color(axis, colors['axis']) axis.tick_params(colors=colors['ticks']) for item in (axis.get_xticklabels() + axis.get_yticklabels()): item.set_fontproperties(fp) item.set_color(colors['color']) for axis, frame in ((ax4, 1), (ax5, 2), (ax6, 3)): if color_scheme == 'greyorfs': color = colors['grey'] else: color = colors['frames'][frame - 1] set_axis_color(axis, color, alpha=0.05) axis.patch.set_alpha(0.3) # opacity of ORF architecture for item in (axis.get_xticklabels()): item.set_fontproperties(fp) item.set_color(colors['color']) axis.set_ylim(0, 0.2) axis.set_xlim(0, transcript_length) starts = [(item, 1) for item in start_stops[frame]['starts']] stops = [(item, 1) for item in start_stops[frame]['stops']] start_colors = [colors['start'] for item in starts] axis.broken_barh(starts, (0.11, 0.2), facecolors=start_colors, edgecolors=start_colors, label='start', zorder=5) stop_colors = [colors['stop'] for item in stops] axis.broken_barh(stops, (0, 0.2), facecolors=stop_colors, edgecolors=stop_colors, label='stop', zorder=5) axis.set_ylabel('{}'.format(frame), fontdict={'family': 'sans-serif', 'color': colors['color'], 'weight': 'normal', 'size': '6'}, rotation='horizontal', labelpad=10, verticalalignment='center') axis.tick_params(top=False, left=False, right=False, labeltop=False, labelleft=False, labelright=False, direction='out', colors=colors['ticks']) plt.title('{}'.format(transcript_name), fontdict={'family': 'sans-serif', 'color': colors['color'], 'weight': 'bold', 'size': 8, 'y': 20}) if not os.path.exists(output_path): os.mkdir(output_path) plt.savefig(os.path.join(output_path, 'riboplot.svg'), facecolor=colors['background']) plt.savefig(os.path.join(output_path, 'riboplot.png'), dpi=600, facecolor=colors['background']) with open(os.path.join(CONFIG.PKG_DATA_DIR, 'riboplot.html')) as g, open(os.path.join(output_path, html_file), 'w') as h: h.write(g.read().format(transcript_name=transcript_name)) css_dir = os.path.join(output_path, 'css') if not os.path.exists(css_dir): os.mkdir(css_dir) css_data_dir = os.path.join(CONFIG.PKG_DATA_DIR, 'css') for fname in os.listdir(css_data_dir): shutil.copy(os.path.join(css_data_dir, fname), os.path.join(output_path, 'css', fname))
def function[plot_profile, parameter[ribo_counts, transcript_name, transcript_length, start_stops, read_lengths, read_offsets, rna_counts, color_scheme, html_file, output_path]]: constant[Plot read counts (in all 3 frames) and RNA coverage if provided for a single transcript. ] variable[colors] assign[=] call[name[get_color_palette], parameter[]] variable[gs] assign[=] call[name[gridspec].GridSpec, parameter[constant[3], constant[1]]] variable[font_axis] assign[=] dictionary[[<ast.Constant object at 0x7da1b092c880>, <ast.Constant object at 0x7da1b092ead0>, <ast.Constant object at 0x7da1b092e860>, <ast.Constant object at 0x7da1b092d6c0>], [<ast.Constant object at 0x7da1b092f4c0>, <ast.Subscript object at 0x7da1b092e380>, <ast.Constant object at 0x7da1b092e500>, <ast.Constant object at 0x7da1b092e9b0>]] variable[gs2] assign[=] call[name[gridspec].GridSpecFromSubplotSpec, parameter[constant[1], constant[1]]] variable[ax2] assign[=] call[name[plt].subplot, parameter[call[name[gs2]][constant[0]]]] variable[label] assign[=] constant[Ribo-Seq count] if name[read_lengths] begin[:] if compare[call[name[len], parameter[name[read_lengths]]] greater[>] constant[1]] begin[:] variable[label] assign[=] call[constant[Ribo-Seq count ({}-mers)].format, parameter[call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b092f100>]]]] call[name[ax2].set_ylabel, parameter[name[label]]] variable[ax_rna] assign[=] constant[None] if name[rna_counts] begin[:] variable[ax_rna] assign[=] call[name[ax2].twinx, parameter[]] call[name[ax_rna].set_ylabel, parameter[constant[RNA-Seq count]]] call[name[ax_rna].bar, parameter[call[name[rna_counts].keys, parameter[]], call[name[rna_counts].values, parameter[]]]] call[name[ax_rna].set_zorder, parameter[constant[1]]] variable[frame_counts] assign[=] dictionary[[<ast.Constant object at 0x7da1b092cf40>, <ast.Constant object at 0x7da1b092cfd0>, <ast.Constant object at 0x7da1b092ca60>], [<ast.Dict object at 0x7da1b092dbd0>, <ast.Dict object at 0x7da1b092cf70>, <ast.Dict object at 0x7da1b092cd90>]] for taget[tuple[[<ast.Name object at 0x7da1b092d1b0>, <ast.Name object at 0x7da1b092d210>]]] in starred[call[name[ribo_counts].iteritems, parameter[]]] begin[:] for taget[name[fr]] in starred[tuple[[<ast.Constant object at 0x7da1b092d150>, <ast.Constant object at 0x7da1b092cd60>, <ast.Constant object at 0x7da1b092cdf0>]]] begin[:] if compare[call[name[v]][name[fr]] greater[>] constant[0]] begin[:] call[call[name[frame_counts]][name[fr]]][name[k]] assign[=] call[name[v]][name[fr]] break variable[cnts] assign[=] list[[]] <ast.ListComp object at 0x7da1b092e170> variable[y_max] assign[=] call[name[float], parameter[binary_operation[call[name[max], parameter[name[cnts]]] * constant[1.25]]]] call[name[ax2].set_ylim, parameter[constant[0.0], name[y_max]]] call[name[ax2].set_zorder, parameter[constant[2]]] call[name[ax2].patch.set_facecolor, parameter[constant[none]]] for taget[name[frame]] in starred[tuple[[<ast.Constant object at 0x7da1b092e620>, <ast.Constant object at 0x7da1b092e470>, <ast.Constant object at 0x7da1b092e1d0>]]] begin[:] variable[color] assign[=] call[call[name[colors]][constant[frames]]][binary_operation[name[frame] - constant[1]]] variable[x_vals] assign[=] call[call[name[frame_counts]][name[frame]].keys, parameter[]] call[name[ax2].bar, parameter[name[x_vals], call[call[name[frame_counts]][name[frame]].values, parameter[]]]] variable[gs3] assign[=] call[name[gridspec].GridSpecFromSubplotSpec, parameter[constant[3], constant[1]]] if compare[name[color_scheme] equal[==] constant[greyorfs]] begin[:] variable[axisbg] assign[=] <ast.ListComp object at 0x7da1b092f8b0> variable[ax4] assign[=] call[name[plt].subplot, parameter[call[name[gs3]][constant[0]]]] variable[ax5] assign[=] call[name[plt].subplot, parameter[call[name[gs3]][constant[1]]]] variable[ax6] assign[=] call[name[plt].subplot, parameter[call[name[gs3]][constant[2]]]] call[name[ax6].set_xlabel, parameter[call[constant[Transcript length ({} nt)].format, parameter[name[transcript_length]]]]] variable[gs4] assign[=] call[name[gridspec].GridSpecFromSubplotSpec, parameter[constant[1], constant[1]]] variable[ax7] assign[=] call[name[plt].subplot, parameter[call[name[gs4]][constant[0]]]] call[name[set_axis_color], parameter[name[ax7], call[name[colors]][constant[background]]]] call[name[ax7].text, parameter[constant[0.02], constant[0.1], constant[AUG]]] call[name[ax7].text, parameter[constant[0.06], constant[0.1], constant[STOP]]] call[name[ax7].text, parameter[constant[0.13], constant[0.1], constant[Frames]]] call[name[ax7].text, parameter[constant[0.17], constant[0.1], constant[1]]] call[name[ax7].text, parameter[constant[0.19], constant[0.1], constant[2]]] call[name[ax7].text, parameter[constant[0.21], constant[0.1], constant[3]]] for taget[name[axis]] in starred[tuple[[<ast.Name object at 0x7da1b0a23970>, <ast.Name object at 0x7da1b0a20700>, <ast.Name object at 0x7da1b0a23b20>]]] begin[:] call[name[axis].tick_params, parameter[]] variable[axes] assign[=] list[[<ast.Name object at 0x7da1b0a220b0>]] if name[ax_rna] begin[:] call[name[axes].append, parameter[name[ax_rna]]] variable[fp] assign[=] call[name[FontProperties], parameter[]] for taget[name[axis]] in starred[name[axes]] begin[:] call[name[set_axis_color], parameter[name[axis], call[name[colors]][constant[axis]]]] call[name[axis].tick_params, parameter[]] for taget[name[item]] in starred[binary_operation[call[name[axis].get_xticklabels, parameter[]] + call[name[axis].get_yticklabels, parameter[]]]] begin[:] call[name[item].set_fontproperties, parameter[name[fp]]] call[name[item].set_color, parameter[call[name[colors]][constant[color]]]] for taget[tuple[[<ast.Name object at 0x7da1b0a22e30>, <ast.Name object at 0x7da1b0a22320>]]] in starred[tuple[[<ast.Tuple object at 0x7da1b0a21060>, <ast.Tuple object at 0x7da1b0a22a10>, <ast.Tuple object at 0x7da1b0a21120>]]] begin[:] if compare[name[color_scheme] equal[==] constant[greyorfs]] begin[:] variable[color] assign[=] call[name[colors]][constant[grey]] call[name[set_axis_color], parameter[name[axis], name[color]]] call[name[axis].patch.set_alpha, parameter[constant[0.3]]] for taget[name[item]] in starred[call[name[axis].get_xticklabels, parameter[]]] begin[:] call[name[item].set_fontproperties, parameter[name[fp]]] call[name[item].set_color, parameter[call[name[colors]][constant[color]]]] call[name[axis].set_ylim, parameter[constant[0], constant[0.2]]] call[name[axis].set_xlim, parameter[constant[0], name[transcript_length]]] variable[starts] assign[=] <ast.ListComp object at 0x7da1b09171c0> variable[stops] assign[=] <ast.ListComp object at 0x7da1b09170d0> variable[start_colors] assign[=] <ast.ListComp object at 0x7da1b0917a90> call[name[axis].broken_barh, parameter[name[starts], tuple[[<ast.Constant object at 0x7da1b0917d00>, <ast.Constant object at 0x7da1b0917d30>]]]] variable[stop_colors] assign[=] <ast.ListComp object at 0x7da1b0914850> call[name[axis].broken_barh, parameter[name[stops], tuple[[<ast.Constant object at 0x7da1b0916e60>, <ast.Constant object at 0x7da1b0914df0>]]]] call[name[axis].set_ylabel, parameter[call[constant[{}].format, parameter[name[frame]]]]] call[name[axis].tick_params, parameter[]] call[name[plt].title, parameter[call[constant[{}].format, parameter[name[transcript_name]]]]] if <ast.UnaryOp object at 0x7da1b0915cc0> begin[:] call[name[os].mkdir, parameter[name[output_path]]] call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[output_path], constant[riboplot.svg]]]]] call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[output_path], constant[riboplot.png]]]]] with call[name[open], parameter[call[name[os].path.join, parameter[name[CONFIG].PKG_DATA_DIR, constant[riboplot.html]]]]] begin[:] call[name[h].write, parameter[call[call[name[g].read, parameter[]].format, parameter[]]]] variable[css_dir] assign[=] call[name[os].path.join, parameter[name[output_path], constant[css]]] if <ast.UnaryOp object at 0x7da1b0915840> begin[:] call[name[os].mkdir, parameter[name[css_dir]]] variable[css_data_dir] assign[=] call[name[os].path.join, parameter[name[CONFIG].PKG_DATA_DIR, constant[css]]] for taget[name[fname]] in starred[call[name[os].listdir, parameter[name[css_data_dir]]]] begin[:] call[name[shutil].copy, parameter[call[name[os].path.join, parameter[name[css_data_dir], name[fname]]], call[name[os].path.join, parameter[name[output_path], constant[css], name[fname]]]]]
keyword[def] identifier[plot_profile] ( identifier[ribo_counts] , identifier[transcript_name] , identifier[transcript_length] , identifier[start_stops] , identifier[read_lengths] = keyword[None] , identifier[read_offsets] = keyword[None] , identifier[rna_counts] = keyword[None] , identifier[color_scheme] = literal[string] , identifier[html_file] = literal[string] , identifier[output_path] = literal[string] ): literal[string] identifier[colors] = identifier[get_color_palette] ( identifier[scheme] = identifier[color_scheme] ) identifier[gs] = identifier[gridspec] . identifier[GridSpec] ( literal[int] , literal[int] , identifier[height_ratios] =[ literal[int] , literal[int] , literal[int] ], identifier[hspace] = literal[int] ) identifier[font_axis] ={ literal[string] : literal[string] , literal[string] : identifier[colors] [ literal[string] ], literal[string] : literal[string] , literal[string] : literal[int] } identifier[gs2] = identifier[gridspec] . identifier[GridSpecFromSubplotSpec] ( literal[int] , literal[int] , identifier[subplot_spec] = identifier[gs] [ literal[int] ]) identifier[ax2] = identifier[plt] . identifier[subplot] ( identifier[gs2] [ literal[int] ]) identifier[label] = literal[string] keyword[if] identifier[read_lengths] : keyword[if] identifier[len] ( identifier[read_lengths] )> literal[int] : identifier[label] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[read_lengths] )) keyword[else] : identifier[label] = literal[string] . identifier[format] ( literal[string] . identifier[format] ( identifier[read_lengths] [ literal[int] ])) identifier[ax2] . identifier[set_ylabel] ( identifier[label] , identifier[fontdict] = identifier[font_axis] , identifier[labelpad] = literal[int] ) identifier[ax_rna] = keyword[None] keyword[if] identifier[rna_counts] : identifier[ax_rna] = identifier[ax2] . identifier[twinx] () identifier[ax_rna] . identifier[set_ylabel] ( literal[string] , identifier[fontdict] = identifier[font_axis] , identifier[labelpad] = literal[int] ) identifier[ax_rna] . identifier[bar] ( identifier[rna_counts] . identifier[keys] (), identifier[rna_counts] . identifier[values] (), identifier[facecolor] = identifier[colors] [ literal[string] ], identifier[edgecolor] = identifier[colors] [ literal[string] ], identifier[label] = literal[string] ) identifier[ax_rna] . identifier[set_zorder] ( literal[int] ) identifier[frame_counts] ={ literal[int] :{}, literal[int] :{}, literal[int] :{}} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ribo_counts] . identifier[iteritems] (): keyword[for] identifier[fr] keyword[in] ( literal[int] , literal[int] , literal[int] ): keyword[if] identifier[v] [ identifier[fr] ]> literal[int] : identifier[frame_counts] [ identifier[fr] ][ identifier[k] ]= identifier[v] [ identifier[fr] ] keyword[break] identifier[cnts] =[] [ identifier[cnts] . identifier[extend] ( identifier[item] . identifier[values] ()) keyword[for] identifier[item] keyword[in] identifier[frame_counts] . identifier[values] ()] identifier[y_max] = identifier[float] ( identifier[max] ( identifier[cnts] )* literal[int] ) identifier[ax2] . identifier[set_ylim] ( literal[int] , identifier[y_max] ) identifier[ax2] . identifier[set_zorder] ( literal[int] ) identifier[ax2] . identifier[patch] . identifier[set_facecolor] ( literal[string] ) keyword[for] identifier[frame] keyword[in] ( literal[int] , literal[int] , literal[int] ): identifier[color] = identifier[colors] [ literal[string] ][ identifier[frame] - literal[int] ] identifier[x_vals] = identifier[frame_counts] [ identifier[frame] ]. identifier[keys] () identifier[ax2] . identifier[bar] ( identifier[x_vals] , identifier[frame_counts] [ identifier[frame] ]. identifier[values] (), identifier[color] = identifier[color] , identifier[facecolor] = identifier[color] , identifier[edgecolor] = identifier[color] ) identifier[gs3] = identifier[gridspec] . identifier[GridSpecFromSubplotSpec] ( literal[int] , literal[int] , identifier[subplot_spec] = identifier[gs] [ literal[int] ], identifier[hspace] = literal[int] ) keyword[if] identifier[color_scheme] == literal[string] : identifier[axisbg] =[ identifier[colors] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] keyword[else] : identifier[axisbg] = identifier[colors] [ literal[string] ] identifier[ax4] = identifier[plt] . identifier[subplot] ( identifier[gs3] [ literal[int] ], identifier[sharex] = identifier[ax2] , identifier[axisbg] = identifier[axisbg] [ literal[int] ]) identifier[ax5] = identifier[plt] . identifier[subplot] ( identifier[gs3] [ literal[int] ], identifier[sharex] = identifier[ax2] , identifier[axisbg] = identifier[axisbg] [ literal[int] ]) identifier[ax6] = identifier[plt] . identifier[subplot] ( identifier[gs3] [ literal[int] ], identifier[sharex] = identifier[ax2] , identifier[axisbg] = identifier[axisbg] [ literal[int] ]) identifier[ax6] . identifier[set_xlabel] ( literal[string] . identifier[format] ( identifier[transcript_length] ), identifier[fontdict] = identifier[font_axis] , identifier[labelpad] = literal[int] ) identifier[gs4] = identifier[gridspec] . identifier[GridSpecFromSubplotSpec] ( literal[int] , literal[int] , identifier[subplot_spec] = identifier[gs] [ literal[int] ], identifier[hspace] = literal[int] ) identifier[ax7] = identifier[plt] . identifier[subplot] ( identifier[gs4] [ literal[int] ], identifier[axisbg] = identifier[colors] [ literal[string] ]) identifier[set_axis_color] ( identifier[ax7] , identifier[colors] [ literal[string] ]) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ], identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[facecolor] = identifier[colors] [ literal[string] ], identifier[edgecolor] = identifier[colors] [ literal[string] ], identifier[linewidth] = literal[int] )) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ])) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ], identifier[fontdict] ={ literal[string] : literal[string] }) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ][ literal[int] ])) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ][ literal[int] ])) identifier[ax7] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[size] = literal[int] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] , identifier[bbox] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[color] = identifier[colors] [ literal[string] ][ literal[int] ])) keyword[for] identifier[axis] keyword[in] ( identifier[ax4] , identifier[ax5] , identifier[ax7] ): identifier[axis] . identifier[tick_params] ( identifier[top] = keyword[False] , identifier[left] = keyword[False] , identifier[right] = keyword[False] , identifier[bottom] = keyword[False] , identifier[labeltop] = keyword[False] , identifier[labelleft] = keyword[False] , identifier[labelright] = keyword[False] , identifier[labelbottom] = keyword[False] ) identifier[axes] =[ identifier[ax2] ] keyword[if] identifier[ax_rna] : identifier[axes] . identifier[append] ( identifier[ax_rna] ) identifier[fp] = identifier[FontProperties] ( identifier[size] = literal[string] ) keyword[for] identifier[axis] keyword[in] identifier[axes] : identifier[set_axis_color] ( identifier[axis] , identifier[colors] [ literal[string] ]) identifier[axis] . identifier[tick_params] ( identifier[colors] = identifier[colors] [ literal[string] ]) keyword[for] identifier[item] keyword[in] ( identifier[axis] . identifier[get_xticklabels] ()+ identifier[axis] . identifier[get_yticklabels] ()): identifier[item] . identifier[set_fontproperties] ( identifier[fp] ) identifier[item] . identifier[set_color] ( identifier[colors] [ literal[string] ]) keyword[for] identifier[axis] , identifier[frame] keyword[in] (( identifier[ax4] , literal[int] ),( identifier[ax5] , literal[int] ),( identifier[ax6] , literal[int] )): keyword[if] identifier[color_scheme] == literal[string] : identifier[color] = identifier[colors] [ literal[string] ] keyword[else] : identifier[color] = identifier[colors] [ literal[string] ][ identifier[frame] - literal[int] ] identifier[set_axis_color] ( identifier[axis] , identifier[color] , identifier[alpha] = literal[int] ) identifier[axis] . identifier[patch] . identifier[set_alpha] ( literal[int] ) keyword[for] identifier[item] keyword[in] ( identifier[axis] . identifier[get_xticklabels] ()): identifier[item] . identifier[set_fontproperties] ( identifier[fp] ) identifier[item] . identifier[set_color] ( identifier[colors] [ literal[string] ]) identifier[axis] . identifier[set_ylim] ( literal[int] , literal[int] ) identifier[axis] . identifier[set_xlim] ( literal[int] , identifier[transcript_length] ) identifier[starts] =[( identifier[item] , literal[int] ) keyword[for] identifier[item] keyword[in] identifier[start_stops] [ identifier[frame] ][ literal[string] ]] identifier[stops] =[( identifier[item] , literal[int] ) keyword[for] identifier[item] keyword[in] identifier[start_stops] [ identifier[frame] ][ literal[string] ]] identifier[start_colors] =[ identifier[colors] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[starts] ] identifier[axis] . identifier[broken_barh] ( identifier[starts] ,( literal[int] , literal[int] ), identifier[facecolors] = identifier[start_colors] , identifier[edgecolors] = identifier[start_colors] , identifier[label] = literal[string] , identifier[zorder] = literal[int] ) identifier[stop_colors] =[ identifier[colors] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[stops] ] identifier[axis] . identifier[broken_barh] ( identifier[stops] ,( literal[int] , literal[int] ), identifier[facecolors] = identifier[stop_colors] , identifier[edgecolors] = identifier[stop_colors] , identifier[label] = literal[string] , identifier[zorder] = literal[int] ) identifier[axis] . identifier[set_ylabel] ( literal[string] . identifier[format] ( identifier[frame] ), identifier[fontdict] ={ literal[string] : literal[string] , literal[string] : identifier[colors] [ literal[string] ], literal[string] : literal[string] , literal[string] : literal[string] }, identifier[rotation] = literal[string] , identifier[labelpad] = literal[int] , identifier[verticalalignment] = literal[string] ) identifier[axis] . identifier[tick_params] ( identifier[top] = keyword[False] , identifier[left] = keyword[False] , identifier[right] = keyword[False] , identifier[labeltop] = keyword[False] , identifier[labelleft] = keyword[False] , identifier[labelright] = keyword[False] , identifier[direction] = literal[string] , identifier[colors] = identifier[colors] [ literal[string] ]) identifier[plt] . identifier[title] ( literal[string] . identifier[format] ( identifier[transcript_name] ), identifier[fontdict] ={ literal[string] : literal[string] , literal[string] : identifier[colors] [ literal[string] ], literal[string] : literal[string] , literal[string] : literal[int] , literal[string] : literal[int] }) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[output_path] ): identifier[os] . identifier[mkdir] ( identifier[output_path] ) identifier[plt] . identifier[savefig] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_path] , literal[string] ), identifier[facecolor] = identifier[colors] [ literal[string] ]) identifier[plt] . identifier[savefig] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_path] , literal[string] ), identifier[dpi] = literal[int] , identifier[facecolor] = identifier[colors] [ literal[string] ]) keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[CONFIG] . identifier[PKG_DATA_DIR] , literal[string] )) keyword[as] identifier[g] , identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_path] , identifier[html_file] ), literal[string] ) keyword[as] identifier[h] : identifier[h] . identifier[write] ( identifier[g] . identifier[read] (). identifier[format] ( identifier[transcript_name] = identifier[transcript_name] )) identifier[css_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_path] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[css_dir] ): identifier[os] . identifier[mkdir] ( identifier[css_dir] ) identifier[css_data_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[CONFIG] . identifier[PKG_DATA_DIR] , literal[string] ) keyword[for] identifier[fname] keyword[in] identifier[os] . identifier[listdir] ( identifier[css_data_dir] ): identifier[shutil] . identifier[copy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[css_data_dir] , identifier[fname] ), identifier[os] . identifier[path] . identifier[join] ( identifier[output_path] , literal[string] , identifier[fname] ))
def plot_profile(ribo_counts, transcript_name, transcript_length, start_stops, read_lengths=None, read_offsets=None, rna_counts=None, color_scheme='default', html_file='index.html', output_path='output'): """Plot read counts (in all 3 frames) and RNA coverage if provided for a single transcript. """ colors = get_color_palette(scheme=color_scheme) gs = gridspec.GridSpec(3, 1, height_ratios=[6, 1.3, 0.5], hspace=0.35) font_axis = {'family': 'sans-serif', 'color': colors['color'], 'weight': 'bold', 'size': 7} # riboseq bar plots gs2 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[0]) ax2 = plt.subplot(gs2[0]) label = 'Ribo-Seq count' if read_lengths: if len(read_lengths) > 1: label = 'Ribo-Seq count ({}-mers)'.format(', '.join(('{}'.format(item) for item in read_lengths))) # depends on [control=['if'], data=[]] else: label = 'Ribo-Seq count ({}-mer)'.format('{}'.format(read_lengths[0])) # depends on [control=['if'], data=[]] ax2.set_ylabel(label, fontdict=font_axis, labelpad=10) # rna coverage if available ax_rna = None if rna_counts: ax_rna = ax2.twinx() ax_rna.set_ylabel('RNA-Seq count', fontdict=font_axis, labelpad=10) ax_rna.bar(rna_counts.keys(), rna_counts.values(), facecolor=colors['rna'], edgecolor=colors['rna'], label='RNA') ax_rna.set_zorder(1) # depends on [control=['if'], data=[]] frame_counts = {1: {}, 2: {}, 3: {}} for (k, v) in ribo_counts.iteritems(): for fr in (1, 2, 3): if v[fr] > 0: frame_counts[fr][k] = v[fr] break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fr']] # depends on [control=['for'], data=[]] cnts = [] [cnts.extend(item.values()) for item in frame_counts.values()] y_max = float(max(cnts) * 1.25) ax2.set_ylim(0.0, y_max) ax2.set_zorder(2) ax2.patch.set_facecolor('none') for frame in (1, 2, 3): color = colors['frames'][frame - 1] x_vals = frame_counts[frame].keys() ax2.bar(x_vals, frame_counts[frame].values(), color=color, facecolor=color, edgecolor=color) # depends on [control=['for'], data=['frame']] # ORF architecture gs3 = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1], hspace=0.1) if color_scheme == 'greyorfs': axisbg = [colors['grey'] for i in range(3)] # depends on [control=['if'], data=[]] else: axisbg = colors['frames'] ax4 = plt.subplot(gs3[0], sharex=ax2, axisbg=axisbg[0]) ax5 = plt.subplot(gs3[1], sharex=ax2, axisbg=axisbg[1]) ax6 = plt.subplot(gs3[2], sharex=ax2, axisbg=axisbg[2]) ax6.set_xlabel('Transcript length ({} nt)'.format(transcript_length), fontdict=font_axis, labelpad=6) # Legend gs4 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[2], hspace=0.1) ax7 = plt.subplot(gs4[0], axisbg=colors['background']) set_axis_color(ax7, colors['background']) ax7.text(0.02, 0.1, 'AUG', size=5, ha='center', va='center', color=colors['color'], bbox=dict(boxstyle='square', facecolor=colors['start'], edgecolor=colors['color'], linewidth=0.3)) ax7.text(0.06, 0.1, 'STOP', size=5, ha='center', va='center', color='white', bbox=dict(boxstyle='square', color=colors['stop'])) ax7.text(0.13, 0.1, 'Frames', size=5, ha='center', va='center', color=colors['color'], fontdict={'weight': 'bold'}) ax7.text(0.17, 0.1, '1', size=5, ha='center', va='center', color='white', bbox=dict(boxstyle='square', color=colors['frames'][0])) ax7.text(0.19, 0.1, '2', size=5, ha='center', va='center', color='white', bbox=dict(boxstyle='square', color=colors['frames'][1])) ax7.text(0.21, 0.1, '3', size=5, ha='center', va='center', color='white', bbox=dict(boxstyle='square', color=colors['frames'][2])) # No ticks or labels for ORF 1, 2 and Legend for axis in (ax4, ax5, ax7): axis.tick_params(top=False, left=False, right=False, bottom=False, labeltop=False, labelleft=False, labelright=False, labelbottom=False) # depends on [control=['for'], data=['axis']] axes = [ax2] if ax_rna: axes.append(ax_rna) # depends on [control=['if'], data=[]] fp = FontProperties(size='5') for axis in axes: set_axis_color(axis, colors['axis']) axis.tick_params(colors=colors['ticks']) for item in axis.get_xticklabels() + axis.get_yticklabels(): item.set_fontproperties(fp) item.set_color(colors['color']) # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['axis']] for (axis, frame) in ((ax4, 1), (ax5, 2), (ax6, 3)): if color_scheme == 'greyorfs': color = colors['grey'] # depends on [control=['if'], data=[]] else: color = colors['frames'][frame - 1] set_axis_color(axis, color, alpha=0.05) axis.patch.set_alpha(0.3) # opacity of ORF architecture for item in axis.get_xticklabels(): item.set_fontproperties(fp) item.set_color(colors['color']) # depends on [control=['for'], data=['item']] axis.set_ylim(0, 0.2) axis.set_xlim(0, transcript_length) starts = [(item, 1) for item in start_stops[frame]['starts']] stops = [(item, 1) for item in start_stops[frame]['stops']] start_colors = [colors['start'] for item in starts] axis.broken_barh(starts, (0.11, 0.2), facecolors=start_colors, edgecolors=start_colors, label='start', zorder=5) stop_colors = [colors['stop'] for item in stops] axis.broken_barh(stops, (0, 0.2), facecolors=stop_colors, edgecolors=stop_colors, label='stop', zorder=5) axis.set_ylabel('{}'.format(frame), fontdict={'family': 'sans-serif', 'color': colors['color'], 'weight': 'normal', 'size': '6'}, rotation='horizontal', labelpad=10, verticalalignment='center') axis.tick_params(top=False, left=False, right=False, labeltop=False, labelleft=False, labelright=False, direction='out', colors=colors['ticks']) # depends on [control=['for'], data=[]] plt.title('{}'.format(transcript_name), fontdict={'family': 'sans-serif', 'color': colors['color'], 'weight': 'bold', 'size': 8, 'y': 20}) if not os.path.exists(output_path): os.mkdir(output_path) # depends on [control=['if'], data=[]] plt.savefig(os.path.join(output_path, 'riboplot.svg'), facecolor=colors['background']) plt.savefig(os.path.join(output_path, 'riboplot.png'), dpi=600, facecolor=colors['background']) with open(os.path.join(CONFIG.PKG_DATA_DIR, 'riboplot.html')) as g, open(os.path.join(output_path, html_file), 'w') as h: h.write(g.read().format(transcript_name=transcript_name)) # depends on [control=['with'], data=['g']] css_dir = os.path.join(output_path, 'css') if not os.path.exists(css_dir): os.mkdir(css_dir) # depends on [control=['if'], data=[]] css_data_dir = os.path.join(CONFIG.PKG_DATA_DIR, 'css') for fname in os.listdir(css_data_dir): shutil.copy(os.path.join(css_data_dir, fname), os.path.join(output_path, 'css', fname)) # depends on [control=['for'], data=['fname']]
def _assert_refspec(self): """Turns out we can't deal with remotes if the refspec is missing""" config = self.config_reader unset = 'placeholder' try: if config.get_value('fetch', default=unset) is unset: msg = "Remote '%s' has no refspec set.\n" msg += "You can set it as follows:" msg += " 'git config --add \"remote.%s.fetch +refs/heads/*:refs/heads/*\"'." raise AssertionError(msg % (self.name, self.name)) finally: config.release()
def function[_assert_refspec, parameter[self]]: constant[Turns out we can't deal with remotes if the refspec is missing] variable[config] assign[=] name[self].config_reader variable[unset] assign[=] constant[placeholder] <ast.Try object at 0x7da204346b60>
keyword[def] identifier[_assert_refspec] ( identifier[self] ): literal[string] identifier[config] = identifier[self] . identifier[config_reader] identifier[unset] = literal[string] keyword[try] : keyword[if] identifier[config] . identifier[get_value] ( literal[string] , identifier[default] = identifier[unset] ) keyword[is] identifier[unset] : identifier[msg] = literal[string] identifier[msg] += literal[string] identifier[msg] += literal[string] keyword[raise] identifier[AssertionError] ( identifier[msg] %( identifier[self] . identifier[name] , identifier[self] . identifier[name] )) keyword[finally] : identifier[config] . identifier[release] ()
def _assert_refspec(self): """Turns out we can't deal with remotes if the refspec is missing""" config = self.config_reader unset = 'placeholder' try: if config.get_value('fetch', default=unset) is unset: msg = "Remote '%s' has no refspec set.\n" msg += 'You can set it as follows:' msg += ' \'git config --add "remote.%s.fetch +refs/heads/*:refs/heads/*"\'.' raise AssertionError(msg % (self.name, self.name)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] finally: config.release()
def tasks_by_tag(self, registry_tag): """ Get tasks from registry by its tag :param registry_tag: any hash-able object :return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \ list of tasks """ if registry_tag not in self.__registry.keys(): return None tasks = self.__registry[registry_tag] return tasks if self.__multiple_tasks_per_tag__ is True else tasks[0]
def function[tasks_by_tag, parameter[self, registry_tag]]: constant[ Get tasks from registry by its tag :param registry_tag: any hash-able object :return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or list of tasks ] if compare[name[registry_tag] <ast.NotIn object at 0x7da2590d7190> call[name[self].__registry.keys, parameter[]]] begin[:] return[constant[None]] variable[tasks] assign[=] call[name[self].__registry][name[registry_tag]] return[<ast.IfExp object at 0x7da2044c0be0>]
keyword[def] identifier[tasks_by_tag] ( identifier[self] , identifier[registry_tag] ): literal[string] keyword[if] identifier[registry_tag] keyword[not] keyword[in] identifier[self] . identifier[__registry] . identifier[keys] (): keyword[return] keyword[None] identifier[tasks] = identifier[self] . identifier[__registry] [ identifier[registry_tag] ] keyword[return] identifier[tasks] keyword[if] identifier[self] . identifier[__multiple_tasks_per_tag__] keyword[is] keyword[True] keyword[else] identifier[tasks] [ literal[int] ]
def tasks_by_tag(self, registry_tag): """ Get tasks from registry by its tag :param registry_tag: any hash-able object :return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or list of tasks """ if registry_tag not in self.__registry.keys(): return None # depends on [control=['if'], data=[]] tasks = self.__registry[registry_tag] return tasks if self.__multiple_tasks_per_tag__ is True else tasks[0]
def asset_create_task(self, *args, **kwargs): """Create a new task :returns: None :rtype: None :raises: None """ if not self.cur_asset: return task = self.create_task(element=self.cur_asset) if task: taskdata = djitemdata.TaskItemData(task) treemodel.TreeItem(taskdata, self.asset_task_model.root)
def function[asset_create_task, parameter[self]]: constant[Create a new task :returns: None :rtype: None :raises: None ] if <ast.UnaryOp object at 0x7da1b141e560> begin[:] return[None] variable[task] assign[=] call[name[self].create_task, parameter[]] if name[task] begin[:] variable[taskdata] assign[=] call[name[djitemdata].TaskItemData, parameter[name[task]]] call[name[treemodel].TreeItem, parameter[name[taskdata], name[self].asset_task_model.root]]
keyword[def] identifier[asset_create_task] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[cur_asset] : keyword[return] identifier[task] = identifier[self] . identifier[create_task] ( identifier[element] = identifier[self] . identifier[cur_asset] ) keyword[if] identifier[task] : identifier[taskdata] = identifier[djitemdata] . identifier[TaskItemData] ( identifier[task] ) identifier[treemodel] . identifier[TreeItem] ( identifier[taskdata] , identifier[self] . identifier[asset_task_model] . identifier[root] )
def asset_create_task(self, *args, **kwargs): """Create a new task :returns: None :rtype: None :raises: None """ if not self.cur_asset: return # depends on [control=['if'], data=[]] task = self.create_task(element=self.cur_asset) if task: taskdata = djitemdata.TaskItemData(task) treemodel.TreeItem(taskdata, self.asset_task_model.root) # depends on [control=['if'], data=[]]
def get_bank_name(clabe: str) -> str: """ Regresa el nombre del banco basado en los primeros 3 digitos https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control """ code = clabe[:3] try: bank_name = BANK_NAMES[BANKS[code]] except KeyError: raise ValueError(f"Ningún banco tiene código '{code}'") else: return bank_name
def function[get_bank_name, parameter[clabe]]: constant[ Regresa el nombre del banco basado en los primeros 3 digitos https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control ] variable[code] assign[=] call[name[clabe]][<ast.Slice object at 0x7da1b2351b40>] <ast.Try object at 0x7da1b23f8a90>
keyword[def] identifier[get_bank_name] ( identifier[clabe] : identifier[str] )-> identifier[str] : literal[string] identifier[code] = identifier[clabe] [: literal[int] ] keyword[try] : identifier[bank_name] = identifier[BANK_NAMES] [ identifier[BANKS] [ identifier[code] ]] keyword[except] identifier[KeyError] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[return] identifier[bank_name]
def get_bank_name(clabe: str) -> str: """ Regresa el nombre del banco basado en los primeros 3 digitos https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control """ code = clabe[:3] try: bank_name = BANK_NAMES[BANKS[code]] # depends on [control=['try'], data=[]] except KeyError: raise ValueError(f"Ningún banco tiene código '{code}'") # depends on [control=['except'], data=[]] else: return bank_name
def intersect_exposure_and_aggregate_hazard(self): """This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer. """ LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard') if is_raster_layer(self.exposure): self.set_state_process( 'impact function', 'Zonal stats between exposure and aggregate hazard') # Be careful, our own zonal stats will take care of different # projections between the two layers. We don't want to reproject # rasters. # noinspection PyTypeChecker self._aggregate_hazard_impacted = zonal_stats( self.exposure, self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process('impact function', 'Add default values') self._aggregate_hazard_impacted = add_default_values( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) # I know it's redundant, it's just to be sure that we don't have # any impact layer for that IF. self._exposure_summary = None else: indivisible_keys = [f['key'] for f in indivisible_exposure] geometry = self.exposure.geometryType() exposure = self.exposure.keywords.get('exposure') is_divisible = exposure not in indivisible_keys if geometry in [ QgsWkbTypes.LineGeometry, QgsWkbTypes.PolygonGeometry] and is_divisible: self.set_state_process( 'exposure', 'Make exposure layer valid') self._exposure = clean_layer(self.exposure) self.debug_layer(self.exposure) self.set_state_process( 'impact function', 'Make aggregate hazard layer valid') self._aggregate_hazard_impacted = clean_layer( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process( 'impact function', 'Intersect divisible features with the aggregate hazard') self._exposure_summary = intersection( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # If the layer has the size field, it means we need to # recompute counts based on the old and new size. fields = self._exposure_summary.keywords['inasafe_fields'] if size_field['key'] in fields: self.set_state_process( 'impact function', 'Recompute counts') LOGGER.info( 'InaSAFE will not use these counts, as we have ratios ' 'since the exposure preparation step.') self._exposure_summary = recompute_counts( self._exposure_summary) self.debug_layer(self._exposure_summary) else: self.set_state_process( 'impact function', 'Highest class of hazard is assigned to the exposure') self._exposure_summary = assign_highest_value( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # set title using definition # the title will be overwritten anyway by standard title # set this as fallback. self._exposure_summary.keywords['title'] = ( layer_purpose_exposure_summary['name']) if qgis_version() >= 21800: self._exposure_summary.setName( self._exposure_summary.keywords['title']) else: self._exposure_summary.setLayerName( self._exposure_summary.keywords['title'])
def function[intersect_exposure_and_aggregate_hazard, parameter[self]]: constant[This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer. ] call[name[LOGGER].info, parameter[constant[ANALYSIS : Intersect Exposure and Aggregate Hazard]]] if call[name[is_raster_layer], parameter[name[self].exposure]] begin[:] call[name[self].set_state_process, parameter[constant[impact function], constant[Zonal stats between exposure and aggregate hazard]]] name[self]._aggregate_hazard_impacted assign[=] call[name[zonal_stats], parameter[name[self].exposure, name[self]._aggregate_hazard_impacted]] call[name[self].debug_layer, parameter[name[self]._aggregate_hazard_impacted]] call[name[self].set_state_process, parameter[constant[impact function], constant[Add default values]]] name[self]._aggregate_hazard_impacted assign[=] call[name[add_default_values], parameter[name[self]._aggregate_hazard_impacted]] call[name[self].debug_layer, parameter[name[self]._aggregate_hazard_impacted]] name[self]._exposure_summary assign[=] constant[None]
keyword[def] identifier[intersect_exposure_and_aggregate_hazard] ( identifier[self] ): literal[string] identifier[LOGGER] . identifier[info] ( literal[string] ) keyword[if] identifier[is_raster_layer] ( identifier[self] . identifier[exposure] ): identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_aggregate_hazard_impacted] = identifier[zonal_stats] ( identifier[self] . identifier[exposure] , identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_aggregate_hazard_impacted] = identifier[add_default_values] ( identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[_exposure_summary] = keyword[None] keyword[else] : identifier[indivisible_keys] =[ identifier[f] [ literal[string] ] keyword[for] identifier[f] keyword[in] identifier[indivisible_exposure] ] identifier[geometry] = identifier[self] . identifier[exposure] . identifier[geometryType] () identifier[exposure] = identifier[self] . identifier[exposure] . identifier[keywords] . identifier[get] ( literal[string] ) identifier[is_divisible] = identifier[exposure] keyword[not] keyword[in] identifier[indivisible_keys] keyword[if] identifier[geometry] keyword[in] [ identifier[QgsWkbTypes] . identifier[LineGeometry] , identifier[QgsWkbTypes] . identifier[PolygonGeometry] ] keyword[and] identifier[is_divisible] : identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_exposure] = identifier[clean_layer] ( identifier[self] . identifier[exposure] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[exposure] ) identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_aggregate_hazard_impacted] = identifier[clean_layer] ( identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_exposure_summary] = identifier[intersection] ( identifier[self] . identifier[_exposure] , identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_exposure_summary] ) identifier[fields] = identifier[self] . identifier[_exposure_summary] . identifier[keywords] [ literal[string] ] keyword[if] identifier[size_field] [ literal[string] ] keyword[in] identifier[fields] : identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[LOGGER] . identifier[info] ( literal[string] literal[string] ) identifier[self] . identifier[_exposure_summary] = identifier[recompute_counts] ( identifier[self] . identifier[_exposure_summary] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_exposure_summary] ) keyword[else] : identifier[self] . identifier[set_state_process] ( literal[string] , literal[string] ) identifier[self] . identifier[_exposure_summary] = identifier[assign_highest_value] ( identifier[self] . identifier[_exposure] , identifier[self] . identifier[_aggregate_hazard_impacted] ) identifier[self] . identifier[debug_layer] ( identifier[self] . identifier[_exposure_summary] ) identifier[self] . identifier[_exposure_summary] . identifier[keywords] [ literal[string] ]=( identifier[layer_purpose_exposure_summary] [ literal[string] ]) keyword[if] identifier[qgis_version] ()>= literal[int] : identifier[self] . identifier[_exposure_summary] . identifier[setName] ( identifier[self] . identifier[_exposure_summary] . identifier[keywords] [ literal[string] ]) keyword[else] : identifier[self] . identifier[_exposure_summary] . identifier[setLayerName] ( identifier[self] . identifier[_exposure_summary] . identifier[keywords] [ literal[string] ])
def intersect_exposure_and_aggregate_hazard(self): """This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer. """ LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard') if is_raster_layer(self.exposure): self.set_state_process('impact function', 'Zonal stats between exposure and aggregate hazard') # Be careful, our own zonal stats will take care of different # projections between the two layers. We don't want to reproject # rasters. # noinspection PyTypeChecker self._aggregate_hazard_impacted = zonal_stats(self.exposure, self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process('impact function', 'Add default values') self._aggregate_hazard_impacted = add_default_values(self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) # I know it's redundant, it's just to be sure that we don't have # any impact layer for that IF. self._exposure_summary = None # depends on [control=['if'], data=[]] else: indivisible_keys = [f['key'] for f in indivisible_exposure] geometry = self.exposure.geometryType() exposure = self.exposure.keywords.get('exposure') is_divisible = exposure not in indivisible_keys if geometry in [QgsWkbTypes.LineGeometry, QgsWkbTypes.PolygonGeometry] and is_divisible: self.set_state_process('exposure', 'Make exposure layer valid') self._exposure = clean_layer(self.exposure) self.debug_layer(self.exposure) self.set_state_process('impact function', 'Make aggregate hazard layer valid') self._aggregate_hazard_impacted = clean_layer(self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process('impact function', 'Intersect divisible features with the aggregate hazard') self._exposure_summary = intersection(self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # If the layer has the size field, it means we need to # recompute counts based on the old and new size. fields = self._exposure_summary.keywords['inasafe_fields'] if size_field['key'] in fields: self.set_state_process('impact function', 'Recompute counts') LOGGER.info('InaSAFE will not use these counts, as we have ratios since the exposure preparation step.') self._exposure_summary = recompute_counts(self._exposure_summary) self.debug_layer(self._exposure_summary) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: self.set_state_process('impact function', 'Highest class of hazard is assigned to the exposure') self._exposure_summary = assign_highest_value(self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # set title using definition # the title will be overwritten anyway by standard title # set this as fallback. self._exposure_summary.keywords['title'] = layer_purpose_exposure_summary['name'] if qgis_version() >= 21800: self._exposure_summary.setName(self._exposure_summary.keywords['title']) # depends on [control=['if'], data=[]] else: self._exposure_summary.setLayerName(self._exposure_summary.keywords['title'])
def main(self): ''' Responsible for calling self.init, initializing self.config.display, and calling self.run. Returns the value returned from self.run. ''' self.status = self.parent.status self.modules = self.parent.executed_modules # A special exception for the extractor module, which should be allowed to # override the verbose setting, e.g., if --matryoshka has been # specified if hasattr(self, "extractor") and self.extractor.config.verbose: self.config.verbose = self.config.display.verbose = True if not self.config.files: binwalk.core.common.debug("No target files specified, module %s terminated" % self.name) return False self.reset_dependencies() try: self.init() except KeyboardInterrupt as e: raise e except Exception as e: self.error(exception=e) return False try: self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT) except KeyboardInterrupt as e: raise e except Exception as e: self.error(exception=e) return False self._plugins_pre_scan() try: retval = self.run() except KeyboardInterrupt as e: raise e except Exception as e: self.error(exception=e) return False self._plugins_post_scan() return retval
def function[main, parameter[self]]: constant[ Responsible for calling self.init, initializing self.config.display, and calling self.run. Returns the value returned from self.run. ] name[self].status assign[=] name[self].parent.status name[self].modules assign[=] name[self].parent.executed_modules if <ast.BoolOp object at 0x7da1b21c5bd0> begin[:] name[self].config.verbose assign[=] constant[True] if <ast.UnaryOp object at 0x7da1b21c76a0> begin[:] call[name[binwalk].core.common.debug, parameter[binary_operation[constant[No target files specified, module %s terminated] <ast.Mod object at 0x7da2590d6920> name[self].name]]] return[constant[False]] call[name[self].reset_dependencies, parameter[]] <ast.Try object at 0x7da1b21c48b0> <ast.Try object at 0x7da1b21c4d60> call[name[self]._plugins_pre_scan, parameter[]] <ast.Try object at 0x7da1b21c4f40> call[name[self]._plugins_post_scan, parameter[]] return[name[retval]]
keyword[def] identifier[main] ( identifier[self] ): literal[string] identifier[self] . identifier[status] = identifier[self] . identifier[parent] . identifier[status] identifier[self] . identifier[modules] = identifier[self] . identifier[parent] . identifier[executed_modules] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[extractor] . identifier[config] . identifier[verbose] : identifier[self] . identifier[config] . identifier[verbose] = identifier[self] . identifier[config] . identifier[display] . identifier[verbose] = keyword[True] keyword[if] keyword[not] identifier[self] . identifier[config] . identifier[files] : identifier[binwalk] . identifier[core] . identifier[common] . identifier[debug] ( literal[string] % identifier[self] . identifier[name] ) keyword[return] keyword[False] identifier[self] . identifier[reset_dependencies] () keyword[try] : identifier[self] . identifier[init] () keyword[except] identifier[KeyboardInterrupt] keyword[as] identifier[e] : keyword[raise] identifier[e] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[error] ( identifier[exception] = identifier[e] ) keyword[return] keyword[False] keyword[try] : identifier[self] . identifier[config] . identifier[display] . identifier[format_strings] ( identifier[self] . identifier[HEADER_FORMAT] , identifier[self] . identifier[RESULT_FORMAT] ) keyword[except] identifier[KeyboardInterrupt] keyword[as] identifier[e] : keyword[raise] identifier[e] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[error] ( identifier[exception] = identifier[e] ) keyword[return] keyword[False] identifier[self] . identifier[_plugins_pre_scan] () keyword[try] : identifier[retval] = identifier[self] . identifier[run] () keyword[except] identifier[KeyboardInterrupt] keyword[as] identifier[e] : keyword[raise] identifier[e] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[error] ( identifier[exception] = identifier[e] ) keyword[return] keyword[False] identifier[self] . identifier[_plugins_post_scan] () keyword[return] identifier[retval]
def main(self): """ Responsible for calling self.init, initializing self.config.display, and calling self.run. Returns the value returned from self.run. """ self.status = self.parent.status self.modules = self.parent.executed_modules # A special exception for the extractor module, which should be allowed to # override the verbose setting, e.g., if --matryoshka has been # specified if hasattr(self, 'extractor') and self.extractor.config.verbose: self.config.verbose = self.config.display.verbose = True # depends on [control=['if'], data=[]] if not self.config.files: binwalk.core.common.debug('No target files specified, module %s terminated' % self.name) return False # depends on [control=['if'], data=[]] self.reset_dependencies() try: self.init() # depends on [control=['try'], data=[]] except KeyboardInterrupt as e: raise e # depends on [control=['except'], data=['e']] except Exception as e: self.error(exception=e) return False # depends on [control=['except'], data=['e']] try: self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT) # depends on [control=['try'], data=[]] except KeyboardInterrupt as e: raise e # depends on [control=['except'], data=['e']] except Exception as e: self.error(exception=e) return False # depends on [control=['except'], data=['e']] self._plugins_pre_scan() try: retval = self.run() # depends on [control=['try'], data=[]] except KeyboardInterrupt as e: raise e # depends on [control=['except'], data=['e']] except Exception as e: self.error(exception=e) return False # depends on [control=['except'], data=['e']] self._plugins_post_scan() return retval
def _get_binop_flow( left, left_type, binary_opnode, right, right_type, context, reverse_context ): """Get the flow for binary operations. The rules are a bit messy: * if left and right have the same type, then only one method will be called, left.__op__(right) * if left and right are unrelated typewise, then first left.__op__(right) is tried and if this does not exist or returns NotImplemented, then right.__rop__(left) is tried. * if left is a subtype of right, then only left.__op__(right) is tried. * if left is a supertype of right, then right.__rop__(left) is first tried and then left.__op__(right) """ op = binary_opnode.op if _same_type(left_type, right_type): methods = [_bin_op(left, binary_opnode, op, right, context)] elif helpers.is_subtype(left_type, right_type): methods = [_bin_op(left, binary_opnode, op, right, context)] elif helpers.is_supertype(left_type, right_type): methods = [ _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), _bin_op(left, binary_opnode, op, right, context), ] else: methods = [ _bin_op(left, binary_opnode, op, right, context), _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), ] return methods
def function[_get_binop_flow, parameter[left, left_type, binary_opnode, right, right_type, context, reverse_context]]: constant[Get the flow for binary operations. The rules are a bit messy: * if left and right have the same type, then only one method will be called, left.__op__(right) * if left and right are unrelated typewise, then first left.__op__(right) is tried and if this does not exist or returns NotImplemented, then right.__rop__(left) is tried. * if left is a subtype of right, then only left.__op__(right) is tried. * if left is a supertype of right, then right.__rop__(left) is first tried and then left.__op__(right) ] variable[op] assign[=] name[binary_opnode].op if call[name[_same_type], parameter[name[left_type], name[right_type]]] begin[:] variable[methods] assign[=] list[[<ast.Call object at 0x7da1b1edaec0>]] return[name[methods]]
keyword[def] identifier[_get_binop_flow] ( identifier[left] , identifier[left_type] , identifier[binary_opnode] , identifier[right] , identifier[right_type] , identifier[context] , identifier[reverse_context] ): literal[string] identifier[op] = identifier[binary_opnode] . identifier[op] keyword[if] identifier[_same_type] ( identifier[left_type] , identifier[right_type] ): identifier[methods] =[ identifier[_bin_op] ( identifier[left] , identifier[binary_opnode] , identifier[op] , identifier[right] , identifier[context] )] keyword[elif] identifier[helpers] . identifier[is_subtype] ( identifier[left_type] , identifier[right_type] ): identifier[methods] =[ identifier[_bin_op] ( identifier[left] , identifier[binary_opnode] , identifier[op] , identifier[right] , identifier[context] )] keyword[elif] identifier[helpers] . identifier[is_supertype] ( identifier[left_type] , identifier[right_type] ): identifier[methods] =[ identifier[_bin_op] ( identifier[right] , identifier[binary_opnode] , identifier[op] , identifier[left] , identifier[reverse_context] , identifier[reverse] = keyword[True] ), identifier[_bin_op] ( identifier[left] , identifier[binary_opnode] , identifier[op] , identifier[right] , identifier[context] ), ] keyword[else] : identifier[methods] =[ identifier[_bin_op] ( identifier[left] , identifier[binary_opnode] , identifier[op] , identifier[right] , identifier[context] ), identifier[_bin_op] ( identifier[right] , identifier[binary_opnode] , identifier[op] , identifier[left] , identifier[reverse_context] , identifier[reverse] = keyword[True] ), ] keyword[return] identifier[methods]
def _get_binop_flow(left, left_type, binary_opnode, right, right_type, context, reverse_context): """Get the flow for binary operations. The rules are a bit messy: * if left and right have the same type, then only one method will be called, left.__op__(right) * if left and right are unrelated typewise, then first left.__op__(right) is tried and if this does not exist or returns NotImplemented, then right.__rop__(left) is tried. * if left is a subtype of right, then only left.__op__(right) is tried. * if left is a supertype of right, then right.__rop__(left) is first tried and then left.__op__(right) """ op = binary_opnode.op if _same_type(left_type, right_type): methods = [_bin_op(left, binary_opnode, op, right, context)] # depends on [control=['if'], data=[]] elif helpers.is_subtype(left_type, right_type): methods = [_bin_op(left, binary_opnode, op, right, context)] # depends on [control=['if'], data=[]] elif helpers.is_supertype(left_type, right_type): methods = [_bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), _bin_op(left, binary_opnode, op, right, context)] # depends on [control=['if'], data=[]] else: methods = [_bin_op(left, binary_opnode, op, right, context), _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True)] return methods
def _ConvertAllTypes(self, allTypes): """ Convert all dynamic types to pyVmomi type definitions """ # Generate lists good for VmomiSupport.CreateXYZType enumTypes = self._Filter(self._ConvertEnumType, allTypes.enumTypeInfo) dataTypes = self._Filter(self._ConvertDataType, allTypes.dataTypeInfo) managedTypes = self._Filter(self._ConvertManagedType, allTypes.managedTypeInfo) retAllTypes = (enumTypes, dataTypes, managedTypes) return retAllTypes
def function[_ConvertAllTypes, parameter[self, allTypes]]: constant[ Convert all dynamic types to pyVmomi type definitions ] variable[enumTypes] assign[=] call[name[self]._Filter, parameter[name[self]._ConvertEnumType, name[allTypes].enumTypeInfo]] variable[dataTypes] assign[=] call[name[self]._Filter, parameter[name[self]._ConvertDataType, name[allTypes].dataTypeInfo]] variable[managedTypes] assign[=] call[name[self]._Filter, parameter[name[self]._ConvertManagedType, name[allTypes].managedTypeInfo]] variable[retAllTypes] assign[=] tuple[[<ast.Name object at 0x7da20c9910f0>, <ast.Name object at 0x7da20c991060>, <ast.Name object at 0x7da20c991cf0>]] return[name[retAllTypes]]
keyword[def] identifier[_ConvertAllTypes] ( identifier[self] , identifier[allTypes] ): literal[string] identifier[enumTypes] = identifier[self] . identifier[_Filter] ( identifier[self] . identifier[_ConvertEnumType] , identifier[allTypes] . identifier[enumTypeInfo] ) identifier[dataTypes] = identifier[self] . identifier[_Filter] ( identifier[self] . identifier[_ConvertDataType] , identifier[allTypes] . identifier[dataTypeInfo] ) identifier[managedTypes] = identifier[self] . identifier[_Filter] ( identifier[self] . identifier[_ConvertManagedType] , identifier[allTypes] . identifier[managedTypeInfo] ) identifier[retAllTypes] =( identifier[enumTypes] , identifier[dataTypes] , identifier[managedTypes] ) keyword[return] identifier[retAllTypes]
def _ConvertAllTypes(self, allTypes): """ Convert all dynamic types to pyVmomi type definitions """ # Generate lists good for VmomiSupport.CreateXYZType enumTypes = self._Filter(self._ConvertEnumType, allTypes.enumTypeInfo) dataTypes = self._Filter(self._ConvertDataType, allTypes.dataTypeInfo) managedTypes = self._Filter(self._ConvertManagedType, allTypes.managedTypeInfo) retAllTypes = (enumTypes, dataTypes, managedTypes) return retAllTypes
def plot_degbandshalffill(): """Plot of Quasiparticle weight for degenerate half-filled bands, showing the Mott transition""" ulim = [3.45, 5.15, 6.85, 8.55] bands = range(1, 5) for band, u_int in zip(bands, ulim): name = 'Z_half_'+str(band)+'band' dop = [0.5] data = ssplt.calc_z(band, dop, np.arange(0, u_int, 0.1),0., name) plt.plot(data['u_int'], data['zeta'][0, :, 0], label='$N={}$'.format(str(band))) ssplt.label_saves('Z_half_multiorb.png')
def function[plot_degbandshalffill, parameter[]]: constant[Plot of Quasiparticle weight for degenerate half-filled bands, showing the Mott transition] variable[ulim] assign[=] list[[<ast.Constant object at 0x7da1b27a51e0>, <ast.Constant object at 0x7da1b27a46a0>, <ast.Constant object at 0x7da1b27a6b90>, <ast.Constant object at 0x7da1b27a41c0>]] variable[bands] assign[=] call[name[range], parameter[constant[1], constant[5]]] for taget[tuple[[<ast.Name object at 0x7da1b27a5240>, <ast.Name object at 0x7da1b27a5960>]]] in starred[call[name[zip], parameter[name[bands], name[ulim]]]] begin[:] variable[name] assign[=] binary_operation[binary_operation[constant[Z_half_] + call[name[str], parameter[name[band]]]] + constant[band]] variable[dop] assign[=] list[[<ast.Constant object at 0x7da1b27a6e00>]] variable[data] assign[=] call[name[ssplt].calc_z, parameter[name[band], name[dop], call[name[np].arange, parameter[constant[0], name[u_int], constant[0.1]]], constant[0.0], name[name]]] call[name[plt].plot, parameter[call[name[data]][constant[u_int]], call[call[name[data]][constant[zeta]]][tuple[[<ast.Constant object at 0x7da1b27a4d60>, <ast.Slice object at 0x7da1b27a4160>, <ast.Constant object at 0x7da1b27a5c30>]]]]] call[name[ssplt].label_saves, parameter[constant[Z_half_multiorb.png]]]
keyword[def] identifier[plot_degbandshalffill] (): literal[string] identifier[ulim] =[ literal[int] , literal[int] , literal[int] , literal[int] ] identifier[bands] = identifier[range] ( literal[int] , literal[int] ) keyword[for] identifier[band] , identifier[u_int] keyword[in] identifier[zip] ( identifier[bands] , identifier[ulim] ): identifier[name] = literal[string] + identifier[str] ( identifier[band] )+ literal[string] identifier[dop] =[ literal[int] ] identifier[data] = identifier[ssplt] . identifier[calc_z] ( identifier[band] , identifier[dop] , identifier[np] . identifier[arange] ( literal[int] , identifier[u_int] , literal[int] ), literal[int] , identifier[name] ) identifier[plt] . identifier[plot] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ][ literal[int] ,:, literal[int] ], identifier[label] = literal[string] . identifier[format] ( identifier[str] ( identifier[band] ))) identifier[ssplt] . identifier[label_saves] ( literal[string] )
def plot_degbandshalffill(): """Plot of Quasiparticle weight for degenerate half-filled bands, showing the Mott transition""" ulim = [3.45, 5.15, 6.85, 8.55] bands = range(1, 5) for (band, u_int) in zip(bands, ulim): name = 'Z_half_' + str(band) + 'band' dop = [0.5] data = ssplt.calc_z(band, dop, np.arange(0, u_int, 0.1), 0.0, name) plt.plot(data['u_int'], data['zeta'][0, :, 0], label='$N={}$'.format(str(band))) # depends on [control=['for'], data=[]] ssplt.label_saves('Z_half_multiorb.png')
def capture(cls, eval_env=0, reference=0): """Capture an execution environment from the stack. If `eval_env` is already an :class:`EvalEnvironment`, it is returned unchanged. Otherwise, we walk up the stack by ``eval_env + reference`` steps and capture that function's evaluation environment. For ``eval_env=0`` and ``reference=0``, the default, this captures the stack frame of the function that calls :meth:`capture`. If ``eval_env + reference`` is 1, then we capture that function's caller, etc. This somewhat complicated calling convention is designed to be convenient for functions which want to capture their caller's environment by default, but also allow explicit environments to be specified. See the second example. Example:: x = 1 this_env = EvalEnvironment.capture() assert this_env.namespace["x"] == 1 def child_func(): return EvalEnvironment.capture(1) this_env_from_child = child_func() assert this_env_from_child.namespace["x"] == 1 Example:: # This function can be used like: # my_model(formula_like, data) # -> evaluates formula_like in caller's environment # my_model(formula_like, data, eval_env=1) # -> evaluates formula_like in caller's caller's environment # my_model(formula_like, data, eval_env=my_env) # -> evaluates formula_like in environment 'my_env' def my_model(formula_like, data, eval_env=0): eval_env = EvalEnvironment.capture(eval_env, reference=1) return model_setup_helper(formula_like, data, eval_env) This is how :func:`dmatrix` works. .. versionadded: 0.2.0 The ``reference`` argument. """ if isinstance(eval_env, cls): return eval_env elif isinstance(eval_env, numbers.Integral): depth = eval_env + reference else: raise TypeError("Parameter 'eval_env' must be either an integer " "or an instance of patsy.EvalEnvironment.") frame = inspect.currentframe() try: for i in range(depth + 1): if frame is None: raise ValueError("call-stack is not that deep!") frame = frame.f_back return cls([frame.f_locals, frame.f_globals], frame.f_code.co_flags & _ALL_FUTURE_FLAGS) # The try/finally is important to avoid a potential reference cycle -- # any exception traceback will carry a reference to *our* frame, which # contains a reference to our local variables, which would otherwise # carry a reference to some parent frame, where the exception was # caught...: finally: del frame
def function[capture, parameter[cls, eval_env, reference]]: constant[Capture an execution environment from the stack. If `eval_env` is already an :class:`EvalEnvironment`, it is returned unchanged. Otherwise, we walk up the stack by ``eval_env + reference`` steps and capture that function's evaluation environment. For ``eval_env=0`` and ``reference=0``, the default, this captures the stack frame of the function that calls :meth:`capture`. If ``eval_env + reference`` is 1, then we capture that function's caller, etc. This somewhat complicated calling convention is designed to be convenient for functions which want to capture their caller's environment by default, but also allow explicit environments to be specified. See the second example. Example:: x = 1 this_env = EvalEnvironment.capture() assert this_env.namespace["x"] == 1 def child_func(): return EvalEnvironment.capture(1) this_env_from_child = child_func() assert this_env_from_child.namespace["x"] == 1 Example:: # This function can be used like: # my_model(formula_like, data) # -> evaluates formula_like in caller's environment # my_model(formula_like, data, eval_env=1) # -> evaluates formula_like in caller's caller's environment # my_model(formula_like, data, eval_env=my_env) # -> evaluates formula_like in environment 'my_env' def my_model(formula_like, data, eval_env=0): eval_env = EvalEnvironment.capture(eval_env, reference=1) return model_setup_helper(formula_like, data, eval_env) This is how :func:`dmatrix` works. .. versionadded: 0.2.0 The ``reference`` argument. ] if call[name[isinstance], parameter[name[eval_env], name[cls]]] begin[:] return[name[eval_env]] variable[frame] assign[=] call[name[inspect].currentframe, parameter[]] <ast.Try object at 0x7da2045643d0>
keyword[def] identifier[capture] ( identifier[cls] , identifier[eval_env] = literal[int] , identifier[reference] = literal[int] ): literal[string] keyword[if] identifier[isinstance] ( identifier[eval_env] , identifier[cls] ): keyword[return] identifier[eval_env] keyword[elif] identifier[isinstance] ( identifier[eval_env] , identifier[numbers] . identifier[Integral] ): identifier[depth] = identifier[eval_env] + identifier[reference] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] ) identifier[frame] = identifier[inspect] . identifier[currentframe] () keyword[try] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[depth] + literal[int] ): keyword[if] identifier[frame] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[frame] = identifier[frame] . identifier[f_back] keyword[return] identifier[cls] ([ identifier[frame] . identifier[f_locals] , identifier[frame] . identifier[f_globals] ], identifier[frame] . identifier[f_code] . identifier[co_flags] & identifier[_ALL_FUTURE_FLAGS] ) keyword[finally] : keyword[del] identifier[frame]
def capture(cls, eval_env=0, reference=0): """Capture an execution environment from the stack. If `eval_env` is already an :class:`EvalEnvironment`, it is returned unchanged. Otherwise, we walk up the stack by ``eval_env + reference`` steps and capture that function's evaluation environment. For ``eval_env=0`` and ``reference=0``, the default, this captures the stack frame of the function that calls :meth:`capture`. If ``eval_env + reference`` is 1, then we capture that function's caller, etc. This somewhat complicated calling convention is designed to be convenient for functions which want to capture their caller's environment by default, but also allow explicit environments to be specified. See the second example. Example:: x = 1 this_env = EvalEnvironment.capture() assert this_env.namespace["x"] == 1 def child_func(): return EvalEnvironment.capture(1) this_env_from_child = child_func() assert this_env_from_child.namespace["x"] == 1 Example:: # This function can be used like: # my_model(formula_like, data) # -> evaluates formula_like in caller's environment # my_model(formula_like, data, eval_env=1) # -> evaluates formula_like in caller's caller's environment # my_model(formula_like, data, eval_env=my_env) # -> evaluates formula_like in environment 'my_env' def my_model(formula_like, data, eval_env=0): eval_env = EvalEnvironment.capture(eval_env, reference=1) return model_setup_helper(formula_like, data, eval_env) This is how :func:`dmatrix` works. .. versionadded: 0.2.0 The ``reference`` argument. """ if isinstance(eval_env, cls): return eval_env # depends on [control=['if'], data=[]] elif isinstance(eval_env, numbers.Integral): depth = eval_env + reference # depends on [control=['if'], data=[]] else: raise TypeError("Parameter 'eval_env' must be either an integer or an instance of patsy.EvalEnvironment.") frame = inspect.currentframe() try: for i in range(depth + 1): if frame is None: raise ValueError('call-stack is not that deep!') # depends on [control=['if'], data=[]] frame = frame.f_back # depends on [control=['for'], data=[]] return cls([frame.f_locals, frame.f_globals], frame.f_code.co_flags & _ALL_FUTURE_FLAGS) # depends on [control=['try'], data=[]] finally: # The try/finally is important to avoid a potential reference cycle -- # any exception traceback will carry a reference to *our* frame, which # contains a reference to our local variables, which would otherwise # carry a reference to some parent frame, where the exception was # caught...: del frame
def tokenized(self, delimiter=' ', overlap_threshold=0.1): """ Return a ordered list of tokens based on all labels. Joins all token from all labels (``label.tokenized()```). If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter used to split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A list containing tokens of all labels ordered according to the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] """ sorted_by_start = sorted(self.labels) tokens = [] last_label_end = None for label in sorted_by_start: if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels overlap, not able to define the correct order') return tokens
def function[tokenized, parameter[self, delimiter, overlap_threshold]]: constant[ Return a ordered list of tokens based on all labels. Joins all token from all labels (``label.tokenized()```). If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter used to split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A list containing tokens of all labels ordered according to the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] ] variable[sorted_by_start] assign[=] call[name[sorted], parameter[name[self].labels]] variable[tokens] assign[=] list[[]] variable[last_label_end] assign[=] constant[None] for taget[name[label]] in starred[name[sorted_by_start]] begin[:] if <ast.BoolOp object at 0x7da1b0b42620> begin[:] call[name[tokens].extend, parameter[call[name[label].tokenized, parameter[]]]] variable[last_label_end] assign[=] name[label].end return[name[tokens]]
keyword[def] identifier[tokenized] ( identifier[self] , identifier[delimiter] = literal[string] , identifier[overlap_threshold] = literal[int] ): literal[string] identifier[sorted_by_start] = identifier[sorted] ( identifier[self] . identifier[labels] ) identifier[tokens] =[] identifier[last_label_end] = keyword[None] keyword[for] identifier[label] keyword[in] identifier[sorted_by_start] : keyword[if] identifier[last_label_end] keyword[is] keyword[None] keyword[or] ( identifier[last_label_end] - identifier[label] . identifier[start] < identifier[overlap_threshold] keyword[and] identifier[last_label_end] > literal[int] ): identifier[tokens] . identifier[extend] ( identifier[label] . identifier[tokenized] ( identifier[delimiter] = identifier[delimiter] )) identifier[last_label_end] = identifier[label] . identifier[end] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[tokens]
def tokenized(self, delimiter=' ', overlap_threshold=0.1): """ Return a ordered list of tokens based on all labels. Joins all token from all labels (``label.tokenized()```). If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter used to split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A list containing tokens of all labels ordered according to the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] """ sorted_by_start = sorted(self.labels) tokens = [] last_label_end = None for label in sorted_by_start: if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end # depends on [control=['if'], data=[]] else: raise ValueError('Labels overlap, not able to define the correct order') # depends on [control=['for'], data=['label']] return tokens
def great_circle_dist(lat1, lon1, lat2, lon2): """ Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters. """ radius = 6372795 # meters lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlat = lat2 - lat1 dlon = lon2 - lon1 # formula from: # http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula a = math.pow(math.sin(dlat / 2), 2) b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2) d = 2 * radius * math.asin(math.sqrt(a + b)) return d
def function[great_circle_dist, parameter[lat1, lon1, lat2, lon2]]: constant[ Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters. ] variable[radius] assign[=] constant[6372795] variable[lat1] assign[=] call[name[math].radians, parameter[name[lat1]]] variable[lon1] assign[=] call[name[math].radians, parameter[name[lon1]]] variable[lat2] assign[=] call[name[math].radians, parameter[name[lat2]]] variable[lon2] assign[=] call[name[math].radians, parameter[name[lon2]]] variable[dlat] assign[=] binary_operation[name[lat2] - name[lat1]] variable[dlon] assign[=] binary_operation[name[lon2] - name[lon1]] variable[a] assign[=] call[name[math].pow, parameter[call[name[math].sin, parameter[binary_operation[name[dlat] / constant[2]]]], constant[2]]] variable[b] assign[=] binary_operation[binary_operation[call[name[math].cos, parameter[name[lat1]]] * call[name[math].cos, parameter[name[lat2]]]] * call[name[math].pow, parameter[call[name[math].sin, parameter[binary_operation[name[dlon] / constant[2]]]], constant[2]]]] variable[d] assign[=] binary_operation[binary_operation[constant[2] * name[radius]] * call[name[math].asin, parameter[call[name[math].sqrt, parameter[binary_operation[name[a] + name[b]]]]]]] return[name[d]]
keyword[def] identifier[great_circle_dist] ( identifier[lat1] , identifier[lon1] , identifier[lat2] , identifier[lon2] ): literal[string] identifier[radius] = literal[int] identifier[lat1] = identifier[math] . identifier[radians] ( identifier[lat1] ) identifier[lon1] = identifier[math] . identifier[radians] ( identifier[lon1] ) identifier[lat2] = identifier[math] . identifier[radians] ( identifier[lat2] ) identifier[lon2] = identifier[math] . identifier[radians] ( identifier[lon2] ) identifier[dlat] = identifier[lat2] - identifier[lat1] identifier[dlon] = identifier[lon2] - identifier[lon1] identifier[a] = identifier[math] . identifier[pow] ( identifier[math] . identifier[sin] ( identifier[dlat] / literal[int] ), literal[int] ) identifier[b] = identifier[math] . identifier[cos] ( identifier[lat1] )* identifier[math] . identifier[cos] ( identifier[lat2] )* identifier[math] . identifier[pow] ( identifier[math] . identifier[sin] ( identifier[dlon] / literal[int] ), literal[int] ) identifier[d] = literal[int] * identifier[radius] * identifier[math] . identifier[asin] ( identifier[math] . identifier[sqrt] ( identifier[a] + identifier[b] )) keyword[return] identifier[d]
def great_circle_dist(lat1, lon1, lat2, lon2): """ Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters. """ radius = 6372795 # meters lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlat = lat2 - lat1 dlon = lon2 - lon1 # formula from: # http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula a = math.pow(math.sin(dlat / 2), 2) b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2) d = 2 * radius * math.asin(math.sqrt(a + b)) return d
def get_user_input(env): """Ask for username, secret (api_key or password) and endpoint_url.""" defaults = config.get_settings_from_client(env.client) # Ask for username username = env.input('Username', default=defaults['username']) # Ask for 'secret' which can be api_key or their password secret = env.getpass('API Key or Password', default=defaults['api_key']) # Ask for which endpoint they want to use endpoint = defaults.get('endpoint_url', 'public') endpoint_type = env.input( 'Endpoint (public|private|custom)', default=endpoint) endpoint_type = endpoint_type.lower() if endpoint_type == 'public': endpoint_url = SoftLayer.API_PUBLIC_ENDPOINT elif endpoint_type == 'private': endpoint_url = SoftLayer.API_PRIVATE_ENDPOINT else: if endpoint_type == 'custom': endpoint_url = env.input('Endpoint URL', default=endpoint) else: endpoint_url = endpoint_type # Ask for timeout timeout = env.input('Timeout', default=defaults['timeout'] or 0) return username, secret, endpoint_url, timeout
def function[get_user_input, parameter[env]]: constant[Ask for username, secret (api_key or password) and endpoint_url.] variable[defaults] assign[=] call[name[config].get_settings_from_client, parameter[name[env].client]] variable[username] assign[=] call[name[env].input, parameter[constant[Username]]] variable[secret] assign[=] call[name[env].getpass, parameter[constant[API Key or Password]]] variable[endpoint] assign[=] call[name[defaults].get, parameter[constant[endpoint_url], constant[public]]] variable[endpoint_type] assign[=] call[name[env].input, parameter[constant[Endpoint (public|private|custom)]]] variable[endpoint_type] assign[=] call[name[endpoint_type].lower, parameter[]] if compare[name[endpoint_type] equal[==] constant[public]] begin[:] variable[endpoint_url] assign[=] name[SoftLayer].API_PUBLIC_ENDPOINT variable[timeout] assign[=] call[name[env].input, parameter[constant[Timeout]]] return[tuple[[<ast.Name object at 0x7da20e956440>, <ast.Name object at 0x7da20e955a80>, <ast.Name object at 0x7da20e9548b0>, <ast.Name object at 0x7da20e955ae0>]]]
keyword[def] identifier[get_user_input] ( identifier[env] ): literal[string] identifier[defaults] = identifier[config] . identifier[get_settings_from_client] ( identifier[env] . identifier[client] ) identifier[username] = identifier[env] . identifier[input] ( literal[string] , identifier[default] = identifier[defaults] [ literal[string] ]) identifier[secret] = identifier[env] . identifier[getpass] ( literal[string] , identifier[default] = identifier[defaults] [ literal[string] ]) identifier[endpoint] = identifier[defaults] . identifier[get] ( literal[string] , literal[string] ) identifier[endpoint_type] = identifier[env] . identifier[input] ( literal[string] , identifier[default] = identifier[endpoint] ) identifier[endpoint_type] = identifier[endpoint_type] . identifier[lower] () keyword[if] identifier[endpoint_type] == literal[string] : identifier[endpoint_url] = identifier[SoftLayer] . identifier[API_PUBLIC_ENDPOINT] keyword[elif] identifier[endpoint_type] == literal[string] : identifier[endpoint_url] = identifier[SoftLayer] . identifier[API_PRIVATE_ENDPOINT] keyword[else] : keyword[if] identifier[endpoint_type] == literal[string] : identifier[endpoint_url] = identifier[env] . identifier[input] ( literal[string] , identifier[default] = identifier[endpoint] ) keyword[else] : identifier[endpoint_url] = identifier[endpoint_type] identifier[timeout] = identifier[env] . identifier[input] ( literal[string] , identifier[default] = identifier[defaults] [ literal[string] ] keyword[or] literal[int] ) keyword[return] identifier[username] , identifier[secret] , identifier[endpoint_url] , identifier[timeout]
def get_user_input(env): """Ask for username, secret (api_key or password) and endpoint_url.""" defaults = config.get_settings_from_client(env.client) # Ask for username username = env.input('Username', default=defaults['username']) # Ask for 'secret' which can be api_key or their password secret = env.getpass('API Key or Password', default=defaults['api_key']) # Ask for which endpoint they want to use endpoint = defaults.get('endpoint_url', 'public') endpoint_type = env.input('Endpoint (public|private|custom)', default=endpoint) endpoint_type = endpoint_type.lower() if endpoint_type == 'public': endpoint_url = SoftLayer.API_PUBLIC_ENDPOINT # depends on [control=['if'], data=[]] elif endpoint_type == 'private': endpoint_url = SoftLayer.API_PRIVATE_ENDPOINT # depends on [control=['if'], data=[]] elif endpoint_type == 'custom': endpoint_url = env.input('Endpoint URL', default=endpoint) # depends on [control=['if'], data=[]] else: endpoint_url = endpoint_type # Ask for timeout timeout = env.input('Timeout', default=defaults['timeout'] or 0) return (username, secret, endpoint_url, timeout)
def SETPO(cpu, dest): """ Sets byte if parity odd. :param cpu: current CPU. :param dest: destination operand. """ dest.write(Operators.ITEBV(dest.size, cpu.PF == False, 1, 0))
def function[SETPO, parameter[cpu, dest]]: constant[ Sets byte if parity odd. :param cpu: current CPU. :param dest: destination operand. ] call[name[dest].write, parameter[call[name[Operators].ITEBV, parameter[name[dest].size, compare[name[cpu].PF equal[==] constant[False]], constant[1], constant[0]]]]]
keyword[def] identifier[SETPO] ( identifier[cpu] , identifier[dest] ): literal[string] identifier[dest] . identifier[write] ( identifier[Operators] . identifier[ITEBV] ( identifier[dest] . identifier[size] , identifier[cpu] . identifier[PF] == keyword[False] , literal[int] , literal[int] ))
def SETPO(cpu, dest): """ Sets byte if parity odd. :param cpu: current CPU. :param dest: destination operand. """ dest.write(Operators.ITEBV(dest.size, cpu.PF == False, 1, 0))
def searche_top_category(self): """doc: http://open.youku.com/docs/doc?id=95 """ url = 'https://openapi.youku.com/v2/schemas/searche/top/category.json' r = requests.get(url) check_error(r) return r.json()
def function[searche_top_category, parameter[self]]: constant[doc: http://open.youku.com/docs/doc?id=95 ] variable[url] assign[=] constant[https://openapi.youku.com/v2/schemas/searche/top/category.json] variable[r] assign[=] call[name[requests].get, parameter[name[url]]] call[name[check_error], parameter[name[r]]] return[call[name[r].json, parameter[]]]
keyword[def] identifier[searche_top_category] ( identifier[self] ): literal[string] identifier[url] = literal[string] identifier[r] = identifier[requests] . identifier[get] ( identifier[url] ) identifier[check_error] ( identifier[r] ) keyword[return] identifier[r] . identifier[json] ()
def searche_top_category(self): """doc: http://open.youku.com/docs/doc?id=95 """ url = 'https://openapi.youku.com/v2/schemas/searche/top/category.json' r = requests.get(url) check_error(r) return r.json()
def convert_symbol_to_entrezid(self, symbol): """Convert Symbol to Entrez Gene Id""" entrezdict = {} server = "http://rest.genenames.org/fetch/symbol/{0}".format(symbol) r = requests.get(server, headers={"Content-Type": "application/json"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) for data in info['response']['result']['doc']['str']: if data['@name'] == 'entrez_id': entrezdict[data['@name']] = data['#text'] if data['@name'] == 'symbol': entrezdict[data['@name']] = data['#text'] return entrezdict
def function[convert_symbol_to_entrezid, parameter[self, symbol]]: constant[Convert Symbol to Entrez Gene Id] variable[entrezdict] assign[=] dictionary[[], []] variable[server] assign[=] call[constant[http://rest.genenames.org/fetch/symbol/{0}].format, parameter[name[symbol]]] variable[r] assign[=] call[name[requests].get, parameter[name[server]]] if <ast.UnaryOp object at 0x7da18c4cd330> begin[:] call[name[r].raise_for_status, parameter[]] call[name[sys].exit, parameter[]] variable[response] assign[=] name[r].text variable[info] assign[=] call[name[xmltodict].parse, parameter[name[response]]] for taget[name[data]] in starred[call[call[call[call[name[info]][constant[response]]][constant[result]]][constant[doc]]][constant[str]]] begin[:] if compare[call[name[data]][constant[@name]] equal[==] constant[entrez_id]] begin[:] call[name[entrezdict]][call[name[data]][constant[@name]]] assign[=] call[name[data]][constant[#text]] if compare[call[name[data]][constant[@name]] equal[==] constant[symbol]] begin[:] call[name[entrezdict]][call[name[data]][constant[@name]]] assign[=] call[name[data]][constant[#text]] return[name[entrezdict]]
keyword[def] identifier[convert_symbol_to_entrezid] ( identifier[self] , identifier[symbol] ): literal[string] identifier[entrezdict] ={} identifier[server] = literal[string] . identifier[format] ( identifier[symbol] ) identifier[r] = identifier[requests] . identifier[get] ( identifier[server] , identifier[headers] ={ literal[string] : literal[string] }) keyword[if] keyword[not] identifier[r] . identifier[ok] : identifier[r] . identifier[raise_for_status] () identifier[sys] . identifier[exit] () identifier[response] = identifier[r] . identifier[text] identifier[info] = identifier[xmltodict] . identifier[parse] ( identifier[response] ) keyword[for] identifier[data] keyword[in] identifier[info] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]: keyword[if] identifier[data] [ literal[string] ]== literal[string] : identifier[entrezdict] [ identifier[data] [ literal[string] ]]= identifier[data] [ literal[string] ] keyword[if] identifier[data] [ literal[string] ]== literal[string] : identifier[entrezdict] [ identifier[data] [ literal[string] ]]= identifier[data] [ literal[string] ] keyword[return] identifier[entrezdict]
def convert_symbol_to_entrezid(self, symbol): """Convert Symbol to Entrez Gene Id""" entrezdict = {} server = 'http://rest.genenames.org/fetch/symbol/{0}'.format(symbol) r = requests.get(server, headers={'Content-Type': 'application/json'}) if not r.ok: r.raise_for_status() sys.exit() # depends on [control=['if'], data=[]] response = r.text info = xmltodict.parse(response) for data in info['response']['result']['doc']['str']: if data['@name'] == 'entrez_id': entrezdict[data['@name']] = data['#text'] # depends on [control=['if'], data=[]] if data['@name'] == 'symbol': entrezdict[data['@name']] = data['#text'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']] return entrezdict
def _add_file(self, key, path): """Copy a file into the reference package.""" filename = os.path.basename(path) base, ext = os.path.splitext(filename) if os.path.exists(self.file_path(filename)): with tempfile.NamedTemporaryFile( dir=self.path, prefix=base, suffix=ext) as tf: filename = os.path.basename(tf.name) shutil.copyfile(path, self.file_path(filename)) self.contents['files'][key] = filename
def function[_add_file, parameter[self, key, path]]: constant[Copy a file into the reference package.] variable[filename] assign[=] call[name[os].path.basename, parameter[name[path]]] <ast.Tuple object at 0x7da1b1b9e530> assign[=] call[name[os].path.splitext, parameter[name[filename]]] if call[name[os].path.exists, parameter[call[name[self].file_path, parameter[name[filename]]]]] begin[:] with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:] variable[filename] assign[=] call[name[os].path.basename, parameter[name[tf].name]] call[name[shutil].copyfile, parameter[name[path], call[name[self].file_path, parameter[name[filename]]]]] call[call[name[self].contents][constant[files]]][name[key]] assign[=] name[filename]
keyword[def] identifier[_add_file] ( identifier[self] , identifier[key] , identifier[path] ): literal[string] identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] ) identifier[base] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[file_path] ( identifier[filename] )): keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[dir] = identifier[self] . identifier[path] , identifier[prefix] = identifier[base] , identifier[suffix] = identifier[ext] ) keyword[as] identifier[tf] : identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[tf] . identifier[name] ) identifier[shutil] . identifier[copyfile] ( identifier[path] , identifier[self] . identifier[file_path] ( identifier[filename] )) identifier[self] . identifier[contents] [ literal[string] ][ identifier[key] ]= identifier[filename]
def _add_file(self, key, path): """Copy a file into the reference package.""" filename = os.path.basename(path) (base, ext) = os.path.splitext(filename) if os.path.exists(self.file_path(filename)): with tempfile.NamedTemporaryFile(dir=self.path, prefix=base, suffix=ext) as tf: filename = os.path.basename(tf.name) # depends on [control=['with'], data=['tf']] # depends on [control=['if'], data=[]] shutil.copyfile(path, self.file_path(filename)) self.contents['files'][key] = filename
def to_bed12(f, db, child_type='exon', name_field='ID'): """ Given a top-level feature (e.g., transcript), construct a BED12 entry Parameters ---------- f : Feature object or string This is the top-level feature represented by one BED12 line. For a canonical GFF or GTF, this will generally be a transcript. db : a FeatureDB object This is need to get the children for the feature child_type : str Featuretypes that will be represented by the BED12 "blocks". Typically "exon". name_field : str Attribute to be used in the "name" field of the BED12 entry. Usually "ID" for GFF; "transcript_id" for GTF. """ if isinstance(f, six.string_types): f = db[f] children = list(db.children(f, featuretype=child_type, order_by='start')) sizes = [len(i) for i in children] starts = [i.start - f.start for i in children] fields = [ f.chrom, f.start - 1, # GTF -> BED coord system f.stop, f.attributes.get(name_field, ['.'])[0], f.score, f.strand, f.start, f.stop, '0,0,0', len(children), ','.join(map(str, sizes)), ','.join(map(str, starts)) ] return '\t'.join(map(str, fields)) + '\n'
def function[to_bed12, parameter[f, db, child_type, name_field]]: constant[ Given a top-level feature (e.g., transcript), construct a BED12 entry Parameters ---------- f : Feature object or string This is the top-level feature represented by one BED12 line. For a canonical GFF or GTF, this will generally be a transcript. db : a FeatureDB object This is need to get the children for the feature child_type : str Featuretypes that will be represented by the BED12 "blocks". Typically "exon". name_field : str Attribute to be used in the "name" field of the BED12 entry. Usually "ID" for GFF; "transcript_id" for GTF. ] if call[name[isinstance], parameter[name[f], name[six].string_types]] begin[:] variable[f] assign[=] call[name[db]][name[f]] variable[children] assign[=] call[name[list], parameter[call[name[db].children, parameter[name[f]]]]] variable[sizes] assign[=] <ast.ListComp object at 0x7da20c9925f0> variable[starts] assign[=] <ast.ListComp object at 0x7da20c993a60> variable[fields] assign[=] list[[<ast.Attribute object at 0x7da20c990970>, <ast.BinOp object at 0x7da20c993f40>, <ast.Attribute object at 0x7da18f00e710>, <ast.Subscript object at 0x7da18f00f670>, <ast.Attribute object at 0x7da18f00f370>, <ast.Attribute object at 0x7da18f00d300>, <ast.Attribute object at 0x7da18f00d960>, <ast.Attribute object at 0x7da18f00cf10>, <ast.Constant object at 0x7da18f00eb30>, <ast.Call object at 0x7da18f00dc30>, <ast.Call object at 0x7da18f00d3f0>, <ast.Call object at 0x7da18f00fac0>]] return[binary_operation[call[constant[ ].join, parameter[call[name[map], parameter[name[str], name[fields]]]]] + constant[ ]]]
keyword[def] identifier[to_bed12] ( identifier[f] , identifier[db] , identifier[child_type] = literal[string] , identifier[name_field] = literal[string] ): literal[string] keyword[if] identifier[isinstance] ( identifier[f] , identifier[six] . identifier[string_types] ): identifier[f] = identifier[db] [ identifier[f] ] identifier[children] = identifier[list] ( identifier[db] . identifier[children] ( identifier[f] , identifier[featuretype] = identifier[child_type] , identifier[order_by] = literal[string] )) identifier[sizes] =[ identifier[len] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[children] ] identifier[starts] =[ identifier[i] . identifier[start] - identifier[f] . identifier[start] keyword[for] identifier[i] keyword[in] identifier[children] ] identifier[fields] =[ identifier[f] . identifier[chrom] , identifier[f] . identifier[start] - literal[int] , identifier[f] . identifier[stop] , identifier[f] . identifier[attributes] . identifier[get] ( identifier[name_field] ,[ literal[string] ])[ literal[int] ], identifier[f] . identifier[score] , identifier[f] . identifier[strand] , identifier[f] . identifier[start] , identifier[f] . identifier[stop] , literal[string] , identifier[len] ( identifier[children] ), literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[sizes] )), literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[starts] )) ] keyword[return] literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[fields] ))+ literal[string]
def to_bed12(f, db, child_type='exon', name_field='ID'): """ Given a top-level feature (e.g., transcript), construct a BED12 entry Parameters ---------- f : Feature object or string This is the top-level feature represented by one BED12 line. For a canonical GFF or GTF, this will generally be a transcript. db : a FeatureDB object This is need to get the children for the feature child_type : str Featuretypes that will be represented by the BED12 "blocks". Typically "exon". name_field : str Attribute to be used in the "name" field of the BED12 entry. Usually "ID" for GFF; "transcript_id" for GTF. """ if isinstance(f, six.string_types): f = db[f] # depends on [control=['if'], data=[]] children = list(db.children(f, featuretype=child_type, order_by='start')) sizes = [len(i) for i in children] starts = [i.start - f.start for i in children] # GTF -> BED coord system fields = [f.chrom, f.start - 1, f.stop, f.attributes.get(name_field, ['.'])[0], f.score, f.strand, f.start, f.stop, '0,0,0', len(children), ','.join(map(str, sizes)), ','.join(map(str, starts))] return '\t'.join(map(str, fields)) + '\n'
def exec_command(attr, cmd): """Runs a subproc to calculate a package attribute. """ import subprocess p = popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode: from rez.exceptions import InvalidPackageError raise InvalidPackageError( "Error determining package attribute '%s':\n%s" % (attr, err)) return out.strip(), err.strip()
def function[exec_command, parameter[attr, cmd]]: constant[Runs a subproc to calculate a package attribute. ] import module[subprocess] variable[p] assign[=] call[name[popen], parameter[name[cmd]]] <ast.Tuple object at 0x7da1b170ca90> assign[=] call[name[p].communicate, parameter[]] if name[p].returncode begin[:] from relative_module[rez.exceptions] import module[InvalidPackageError] <ast.Raise object at 0x7da1b170ded0> return[tuple[[<ast.Call object at 0x7da1b170ee30>, <ast.Call object at 0x7da1b170ef50>]]]
keyword[def] identifier[exec_command] ( identifier[attr] , identifier[cmd] ): literal[string] keyword[import] identifier[subprocess] identifier[p] = identifier[popen] ( identifier[cmd] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ) identifier[out] , identifier[err] = identifier[p] . identifier[communicate] () keyword[if] identifier[p] . identifier[returncode] : keyword[from] identifier[rez] . identifier[exceptions] keyword[import] identifier[InvalidPackageError] keyword[raise] identifier[InvalidPackageError] ( literal[string] %( identifier[attr] , identifier[err] )) keyword[return] identifier[out] . identifier[strip] (), identifier[err] . identifier[strip] ()
def exec_command(attr, cmd): """Runs a subproc to calculate a package attribute. """ import subprocess p = popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if p.returncode: from rez.exceptions import InvalidPackageError raise InvalidPackageError("Error determining package attribute '%s':\n%s" % (attr, err)) # depends on [control=['if'], data=[]] return (out.strip(), err.strip())
def get_prepared_include_exclude(attributes): """Return tuple with prepared __include__ and __exclude__ attributes. :type attributes: dict :rtype: tuple """ attrs = dict() for attr in ('__include__', '__exclude__'): attrs[attr] = tuple([item.name for item in attributes.get(attr, tuple())]) return attrs['__include__'], attrs['__exclude__']
def function[get_prepared_include_exclude, parameter[attributes]]: constant[Return tuple with prepared __include__ and __exclude__ attributes. :type attributes: dict :rtype: tuple ] variable[attrs] assign[=] call[name[dict], parameter[]] for taget[name[attr]] in starred[tuple[[<ast.Constant object at 0x7da1b25975e0>, <ast.Constant object at 0x7da1b2595900>]]] begin[:] call[name[attrs]][name[attr]] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b2595180>]] return[tuple[[<ast.Subscript object at 0x7da1b2595d80>, <ast.Subscript object at 0x7da1b2597bb0>]]]
keyword[def] identifier[get_prepared_include_exclude] ( identifier[attributes] ): literal[string] identifier[attrs] = identifier[dict] () keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] ): identifier[attrs] [ identifier[attr] ]= identifier[tuple] ([ identifier[item] . identifier[name] keyword[for] identifier[item] keyword[in] identifier[attributes] . identifier[get] ( identifier[attr] , identifier[tuple] ())]) keyword[return] identifier[attrs] [ literal[string] ], identifier[attrs] [ literal[string] ]
def get_prepared_include_exclude(attributes): """Return tuple with prepared __include__ and __exclude__ attributes. :type attributes: dict :rtype: tuple """ attrs = dict() for attr in ('__include__', '__exclude__'): attrs[attr] = tuple([item.name for item in attributes.get(attr, tuple())]) # depends on [control=['for'], data=['attr']] return (attrs['__include__'], attrs['__exclude__'])
def create(self, alias=None, cache=None, **kwargs): """ Create a new cache. Either alias or cache params are required. You can use kwargs to pass extra parameters to configure the cache. .. deprecated:: 0.11.0 Only creating a cache passing an alias is supported. If you want to create a cache passing explicit cache and kwargs use ``aiocache.Cache``. :param alias: str alias to pull configuration from :param cache: str or class cache class to use for creating the new cache (when no alias is used) :return: New cache instance """ if alias: config = self.get_alias_config(alias) elif cache: warnings.warn( "Creating a cache with an explicit config is deprecated, use 'aiocache.Cache'", DeprecationWarning, ) config = {"cache": cache} else: raise TypeError("create call needs to receive an alias or a cache") cache = _create_cache(**{**config, **kwargs}) return cache
def function[create, parameter[self, alias, cache]]: constant[ Create a new cache. Either alias or cache params are required. You can use kwargs to pass extra parameters to configure the cache. .. deprecated:: 0.11.0 Only creating a cache passing an alias is supported. If you want to create a cache passing explicit cache and kwargs use ``aiocache.Cache``. :param alias: str alias to pull configuration from :param cache: str or class cache class to use for creating the new cache (when no alias is used) :return: New cache instance ] if name[alias] begin[:] variable[config] assign[=] call[name[self].get_alias_config, parameter[name[alias]]] variable[cache] assign[=] call[name[_create_cache], parameter[]] return[name[cache]]
keyword[def] identifier[create] ( identifier[self] , identifier[alias] = keyword[None] , identifier[cache] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[alias] : identifier[config] = identifier[self] . identifier[get_alias_config] ( identifier[alias] ) keyword[elif] identifier[cache] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] , ) identifier[config] ={ literal[string] : identifier[cache] } keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[cache] = identifier[_create_cache] (**{** identifier[config] ,** identifier[kwargs] }) keyword[return] identifier[cache]
def create(self, alias=None, cache=None, **kwargs): """ Create a new cache. Either alias or cache params are required. You can use kwargs to pass extra parameters to configure the cache. .. deprecated:: 0.11.0 Only creating a cache passing an alias is supported. If you want to create a cache passing explicit cache and kwargs use ``aiocache.Cache``. :param alias: str alias to pull configuration from :param cache: str or class cache class to use for creating the new cache (when no alias is used) :return: New cache instance """ if alias: config = self.get_alias_config(alias) # depends on [control=['if'], data=[]] elif cache: warnings.warn("Creating a cache with an explicit config is deprecated, use 'aiocache.Cache'", DeprecationWarning) config = {'cache': cache} # depends on [control=['if'], data=[]] else: raise TypeError('create call needs to receive an alias or a cache') cache = _create_cache(**{**config, **kwargs}) return cache
def create_file(project: str, environment: str, feature: str, state: str) -> None: """ Create file to replay. Create file with ``rc`` command that will be called against the LaunchDarkly API when ``rc playback`` is called from the main CLI. :param project: LaunchDarkly Project :param environment: LaunchDarkly Environment :param feature: LaunchDarkly Feature :param state: State to update feature flag """ check_local() save_path = './replay/toDo/' filename = '{0}.txt'.format(str(uuid.uuid1())) complete_name = os.path.join(save_path, filename) with open(complete_name, 'w') as filename: filename.write('rc update-ld-api -p {0} -e {1} -f {2} -s {3}'.format( project, environment, feature, state ))
def function[create_file, parameter[project, environment, feature, state]]: constant[ Create file to replay. Create file with ``rc`` command that will be called against the LaunchDarkly API when ``rc playback`` is called from the main CLI. :param project: LaunchDarkly Project :param environment: LaunchDarkly Environment :param feature: LaunchDarkly Feature :param state: State to update feature flag ] call[name[check_local], parameter[]] variable[save_path] assign[=] constant[./replay/toDo/] variable[filename] assign[=] call[constant[{0}.txt].format, parameter[call[name[str], parameter[call[name[uuid].uuid1, parameter[]]]]]] variable[complete_name] assign[=] call[name[os].path.join, parameter[name[save_path], name[filename]]] with call[name[open], parameter[name[complete_name], constant[w]]] begin[:] call[name[filename].write, parameter[call[constant[rc update-ld-api -p {0} -e {1} -f {2} -s {3}].format, parameter[name[project], name[environment], name[feature], name[state]]]]]
keyword[def] identifier[create_file] ( identifier[project] : identifier[str] , identifier[environment] : identifier[str] , identifier[feature] : identifier[str] , identifier[state] : identifier[str] )-> keyword[None] : literal[string] identifier[check_local] () identifier[save_path] = literal[string] identifier[filename] = literal[string] . identifier[format] ( identifier[str] ( identifier[uuid] . identifier[uuid1] ())) identifier[complete_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[save_path] , identifier[filename] ) keyword[with] identifier[open] ( identifier[complete_name] , literal[string] ) keyword[as] identifier[filename] : identifier[filename] . identifier[write] ( literal[string] . identifier[format] ( identifier[project] , identifier[environment] , identifier[feature] , identifier[state] ))
def create_file(project: str, environment: str, feature: str, state: str) -> None: """ Create file to replay. Create file with ``rc`` command that will be called against the LaunchDarkly API when ``rc playback`` is called from the main CLI. :param project: LaunchDarkly Project :param environment: LaunchDarkly Environment :param feature: LaunchDarkly Feature :param state: State to update feature flag """ check_local() save_path = './replay/toDo/' filename = '{0}.txt'.format(str(uuid.uuid1())) complete_name = os.path.join(save_path, filename) with open(complete_name, 'w') as filename: filename.write('rc update-ld-api -p {0} -e {1} -f {2} -s {3}'.format(project, environment, feature, state)) # depends on [control=['with'], data=['filename']]
def docx_to_md(input_name, output_name): """ Converts an input docx file to MarkDown file of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .md can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.docx To convert the document: >>> from aide_document import convert >>> convert.docx_to_md(data/doc.docx, data/doc.md) .md can also be omitted from the second argument. """ if output_name[-5:] == '.docx': os.system("pandoc " + input_name + " -o " + output_name) else: os.system("pandoc " + input_name + " -o " + output_name + ".docx" )
def function[docx_to_md, parameter[input_name, output_name]]: constant[ Converts an input docx file to MarkDown file of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .md can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.docx To convert the document: >>> from aide_document import convert >>> convert.docx_to_md(data/doc.docx, data/doc.md) .md can also be omitted from the second argument. ] if compare[call[name[output_name]][<ast.Slice object at 0x7da20c6aae00>] equal[==] constant[.docx]] begin[:] call[name[os].system, parameter[binary_operation[binary_operation[binary_operation[constant[pandoc ] + name[input_name]] + constant[ -o ]] + name[output_name]]]]
keyword[def] identifier[docx_to_md] ( identifier[input_name] , identifier[output_name] ): literal[string] keyword[if] identifier[output_name] [- literal[int] :]== literal[string] : identifier[os] . identifier[system] ( literal[string] + identifier[input_name] + literal[string] + identifier[output_name] ) keyword[else] : identifier[os] . identifier[system] ( literal[string] + identifier[input_name] + literal[string] + identifier[output_name] + literal[string] )
def docx_to_md(input_name, output_name): """ Converts an input docx file to MarkDown file of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .md can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.docx To convert the document: >>> from aide_document import convert >>> convert.docx_to_md(data/doc.docx, data/doc.md) .md can also be omitted from the second argument. """ if output_name[-5:] == '.docx': os.system('pandoc ' + input_name + ' -o ' + output_name) # depends on [control=['if'], data=[]] else: os.system('pandoc ' + input_name + ' -o ' + output_name + '.docx')
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'): """Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. """ if destino: if os.path.exists(destino): raise IOError((errno.EEXIST, 'File exists', destino,)) destino = os.path.abspath(destino) fd = os.open(destino, os.O_EXCL|os.O_CREAT|os.O_WRONLY) else: fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix) os.write(fd, self.conteudo()) os.fsync(fd) os.close(fd) return os.path.abspath(destino)
def function[salvar, parameter[self, destino, prefix, suffix]]: constant[Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. ] if name[destino] begin[:] if call[name[os].path.exists, parameter[name[destino]]] begin[:] <ast.Raise object at 0x7da1b0e74520> variable[destino] assign[=] call[name[os].path.abspath, parameter[name[destino]]] variable[fd] assign[=] call[name[os].open, parameter[name[destino], binary_operation[binary_operation[name[os].O_EXCL <ast.BitOr object at 0x7da2590d6aa0> name[os].O_CREAT] <ast.BitOr object at 0x7da2590d6aa0> name[os].O_WRONLY]]] call[name[os].write, parameter[name[fd], call[name[self].conteudo, parameter[]]]] call[name[os].fsync, parameter[name[fd]]] call[name[os].close, parameter[name[fd]]] return[call[name[os].path.abspath, parameter[name[destino]]]]
keyword[def] identifier[salvar] ( identifier[self] , identifier[destino] = keyword[None] , identifier[prefix] = literal[string] , identifier[suffix] = literal[string] ): literal[string] keyword[if] identifier[destino] : keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[destino] ): keyword[raise] identifier[IOError] (( identifier[errno] . identifier[EEXIST] , literal[string] , identifier[destino] ,)) identifier[destino] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[destino] ) identifier[fd] = identifier[os] . identifier[open] ( identifier[destino] , identifier[os] . identifier[O_EXCL] | identifier[os] . identifier[O_CREAT] | identifier[os] . identifier[O_WRONLY] ) keyword[else] : identifier[fd] , identifier[destino] = identifier[tempfile] . identifier[mkstemp] ( identifier[prefix] = identifier[prefix] , identifier[suffix] = identifier[suffix] ) identifier[os] . identifier[write] ( identifier[fd] , identifier[self] . identifier[conteudo] ()) identifier[os] . identifier[fsync] ( identifier[fd] ) identifier[os] . identifier[close] ( identifier[fd] ) keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[destino] )
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'): """Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. """ if destino: if os.path.exists(destino): raise IOError((errno.EEXIST, 'File exists', destino)) # depends on [control=['if'], data=[]] destino = os.path.abspath(destino) fd = os.open(destino, os.O_EXCL | os.O_CREAT | os.O_WRONLY) # depends on [control=['if'], data=[]] else: (fd, destino) = tempfile.mkstemp(prefix=prefix, suffix=suffix) os.write(fd, self.conteudo()) os.fsync(fd) os.close(fd) return os.path.abspath(destino)
def get_scalar_arg_dtypes(self): """Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar. """ dtypes = [] for name, data in self._kernel_data.items(): dtypes.extend(data.get_scalar_arg_dtypes()) return dtypes
def function[get_scalar_arg_dtypes, parameter[self]]: constant[Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar. ] variable[dtypes] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da204346ef0>, <ast.Name object at 0x7da204346a10>]]] in starred[call[name[self]._kernel_data.items, parameter[]]] begin[:] call[name[dtypes].extend, parameter[call[name[data].get_scalar_arg_dtypes, parameter[]]]] return[name[dtypes]]
keyword[def] identifier[get_scalar_arg_dtypes] ( identifier[self] ): literal[string] identifier[dtypes] =[] keyword[for] identifier[name] , identifier[data] keyword[in] identifier[self] . identifier[_kernel_data] . identifier[items] (): identifier[dtypes] . identifier[extend] ( identifier[data] . identifier[get_scalar_arg_dtypes] ()) keyword[return] identifier[dtypes]
def get_scalar_arg_dtypes(self): """Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar. """ dtypes = [] for (name, data) in self._kernel_data.items(): dtypes.extend(data.get_scalar_arg_dtypes()) # depends on [control=['for'], data=[]] return dtypes
def wrap_function(self, func): """ Wrap a function to profile it. """ def f(*args, **kwds): self.enable_by_count() try: result = func(*args, **kwds) finally: self.disable_by_count() return result return f
def function[wrap_function, parameter[self, func]]: constant[ Wrap a function to profile it. ] def function[f, parameter[]]: call[name[self].enable_by_count, parameter[]] <ast.Try object at 0x7da18f00c640> return[name[result]] return[name[f]]
keyword[def] identifier[wrap_function] ( identifier[self] , identifier[func] ): literal[string] keyword[def] identifier[f] (* identifier[args] ,** identifier[kwds] ): identifier[self] . identifier[enable_by_count] () keyword[try] : identifier[result] = identifier[func] (* identifier[args] ,** identifier[kwds] ) keyword[finally] : identifier[self] . identifier[disable_by_count] () keyword[return] identifier[result] keyword[return] identifier[f]
def wrap_function(self, func): """ Wrap a function to profile it. """ def f(*args, **kwds): self.enable_by_count() try: result = func(*args, **kwds) # depends on [control=['try'], data=[]] finally: self.disable_by_count() return result return f
def readsegment(d, segment): """ Prepare to read segment of data """ # set requested time range based on given parameters starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['segmenttimes'][segment,0],'d'),form=['ymd'], prec=9)[0], 's'))[0] stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['segmenttimes'][segment,1],'d'),form=['ymd'], prec=9)[0], 's'))[0] logger.info('Reading segment %d/%d, times %s to %s' % (segment, len(d['segmenttimes'])-1, qa.time(qa.quantity(starttime/(24*3600),'d'),form=['hms'], prec=9)[0], qa.time(qa.quantity(stoptime/(24*3600), 'd'), form=['hms'], prec=9)[0])) # read data into data structure ms.open(d['filename']) if len(d['spwlist']) == 1: ms.selectinit(datadescid=d['spwlist'][0]) else: ms.selectinit(datadescid=0, reset=True) # reset includes spw in iteration over time selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10]} # selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10], 'antenna1': d['ants'], 'antenna2': d['ants']} # **this misses ants for some reason!** ms.select(items = selection) ms.selectpolarization(d['pols']) da = ms.getdata([d['datacol'],'axis_info','u','v','w','flag','data_desc_id'], ifraxis=True) good = n.where((da['data_desc_id']) == d['spwlist'][0])[0] # take first spw time0 = da['axis_info']['time_axis']['MJDseconds'][good] data0 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good] flag0 = n.transpose(da['flag'], axes=[3,2,1,0])[good] u0 = da['u'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8) # uvw are in m, so divide by wavelength of first chan to set in lambda v0 = da['v'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8) w0 = da['w'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8) if len(d['spwlist']) > 1: for spw in d['spwlist'][1:]: good = n.where((da['data_desc_id']) == spw)[0] data1 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good] data0 = n.concatenate( (data0, data1), axis=2 ) flag0 = n.concatenate( (flag0, n.transpose(da['flag'], axes=[3,2,1,0])[good]), axis=2 ) del da data0 = data0[:,:,d['chans'],:] * n.invert(flag0[:,:,d['chans'],:]) # flag==1 means bad data (for vla) return data0.astype('complex64'), u0.astype('float32'), v0.astype('float32'), w0.astype('float32')
def function[readsegment, parameter[d, segment]]: constant[ Prepare to read segment of data ] variable[starttime] assign[=] call[call[name[qa].getvalue, parameter[call[name[qa].convert, parameter[call[call[name[qa].time, parameter[call[name[qa].quantity, parameter[call[call[name[d]][constant[segmenttimes]]][tuple[[<ast.Name object at 0x7da1b2527c10>, <ast.Constant object at 0x7da1b2526980>]]], constant[d]]]]]][constant[0]], constant[s]]]]]][constant[0]] variable[stoptime] assign[=] call[call[name[qa].getvalue, parameter[call[name[qa].convert, parameter[call[call[name[qa].time, parameter[call[name[qa].quantity, parameter[call[call[name[d]][constant[segmenttimes]]][tuple[[<ast.Name object at 0x7da1b2524820>, <ast.Constant object at 0x7da1b2527e20>]]], constant[d]]]]]][constant[0]], constant[s]]]]]][constant[0]] call[name[logger].info, parameter[binary_operation[constant[Reading segment %d/%d, times %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2524670>, <ast.BinOp object at 0x7da1b2524580>, <ast.Subscript object at 0x7da1b2526500>, <ast.Subscript object at 0x7da1b2525a80>]]]]] call[name[ms].open, parameter[call[name[d]][constant[filename]]]] if compare[call[name[len], parameter[call[name[d]][constant[spwlist]]]] equal[==] constant[1]] begin[:] call[name[ms].selectinit, parameter[]] variable[selection] assign[=] dictionary[[<ast.Constant object at 0x7da1b2524850>, <ast.Constant object at 0x7da1b25246a0>], [<ast.List object at 0x7da1b25249d0>, <ast.List object at 0x7da1b2524640>]] call[name[ms].select, parameter[]] call[name[ms].selectpolarization, parameter[call[name[d]][constant[pols]]]] variable[da] assign[=] call[name[ms].getdata, parameter[list[[<ast.Subscript object at 0x7da1b2525390>, <ast.Constant object at 0x7da1b2525870>, <ast.Constant object at 0x7da1b2525c30>, <ast.Constant object at 0x7da1b25279a0>, <ast.Constant object at 0x7da1b2526050>, <ast.Constant object at 0x7da1b2527430>, <ast.Constant object at 0x7da1b2524880>]]]] variable[good] assign[=] call[call[name[n].where, parameter[compare[call[name[da]][constant[data_desc_id]] equal[==] call[call[name[d]][constant[spwlist]]][constant[0]]]]]][constant[0]] variable[time0] assign[=] call[call[call[call[name[da]][constant[axis_info]]][constant[time_axis]]][constant[MJDseconds]]][name[good]] variable[data0] assign[=] call[call[name[n].transpose, parameter[call[name[da]][call[name[d]][constant[datacol]]]]]][name[good]] variable[flag0] assign[=] call[call[name[n].transpose, parameter[call[name[da]][constant[flag]]]]][name[good]] variable[u0] assign[=] binary_operation[binary_operation[call[call[call[name[da]][constant[u]].transpose, parameter[]]][name[good]] * call[call[name[d]][constant[freq_orig]]][constant[0]]] * binary_operation[constant[1000000000.0] / constant[300000000.0]]] variable[v0] assign[=] binary_operation[binary_operation[call[call[call[name[da]][constant[v]].transpose, parameter[]]][name[good]] * call[call[name[d]][constant[freq_orig]]][constant[0]]] * binary_operation[constant[1000000000.0] / constant[300000000.0]]] variable[w0] assign[=] binary_operation[binary_operation[call[call[call[name[da]][constant[w]].transpose, parameter[]]][name[good]] * call[call[name[d]][constant[freq_orig]]][constant[0]]] * binary_operation[constant[1000000000.0] / constant[300000000.0]]] if compare[call[name[len], parameter[call[name[d]][constant[spwlist]]]] greater[>] constant[1]] begin[:] for taget[name[spw]] in starred[call[call[name[d]][constant[spwlist]]][<ast.Slice object at 0x7da1b253f3d0>]] begin[:] variable[good] assign[=] call[call[name[n].where, parameter[compare[call[name[da]][constant[data_desc_id]] equal[==] name[spw]]]]][constant[0]] variable[data1] assign[=] call[call[name[n].transpose, parameter[call[name[da]][call[name[d]][constant[datacol]]]]]][name[good]] variable[data0] assign[=] call[name[n].concatenate, parameter[tuple[[<ast.Name object at 0x7da1b253fb50>, <ast.Name object at 0x7da1b253f880>]]]] variable[flag0] assign[=] call[name[n].concatenate, parameter[tuple[[<ast.Name object at 0x7da1b253fe20>, <ast.Subscript object at 0x7da1b253c6a0>]]]] <ast.Delete object at 0x7da1b253d990> variable[data0] assign[=] binary_operation[call[name[data0]][tuple[[<ast.Slice object at 0x7da18dc06470>, <ast.Slice object at 0x7da18dc07670>, <ast.Subscript object at 0x7da18dc06f50>, <ast.Slice object at 0x7da18dc07370>]]] * call[name[n].invert, parameter[call[name[flag0]][tuple[[<ast.Slice object at 0x7da18dc04ee0>, <ast.Slice object at 0x7da18dc07580>, <ast.Subscript object at 0x7da18dc06890>, <ast.Slice object at 0x7da18dc05f00>]]]]]] return[tuple[[<ast.Call object at 0x7da18dc07e20>, <ast.Call object at 0x7da18dc07e50>, <ast.Call object at 0x7da18dc06920>, <ast.Call object at 0x7da18dc07a60>]]]
keyword[def] identifier[readsegment] ( identifier[d] , identifier[segment] ): literal[string] identifier[starttime] = identifier[qa] . identifier[getvalue] ( identifier[qa] . identifier[convert] ( identifier[qa] . identifier[time] ( identifier[qa] . identifier[quantity] ( identifier[d] [ literal[string] ][ identifier[segment] , literal[int] ], literal[string] ), identifier[form] =[ literal[string] ], identifier[prec] = literal[int] )[ literal[int] ], literal[string] ))[ literal[int] ] identifier[stoptime] = identifier[qa] . identifier[getvalue] ( identifier[qa] . identifier[convert] ( identifier[qa] . identifier[time] ( identifier[qa] . identifier[quantity] ( identifier[d] [ literal[string] ][ identifier[segment] , literal[int] ], literal[string] ), identifier[form] =[ literal[string] ], identifier[prec] = literal[int] )[ literal[int] ], literal[string] ))[ literal[int] ] identifier[logger] . identifier[info] ( literal[string] %( identifier[segment] , identifier[len] ( identifier[d] [ literal[string] ])- literal[int] , identifier[qa] . identifier[time] ( identifier[qa] . identifier[quantity] ( identifier[starttime] /( literal[int] * literal[int] ), literal[string] ), identifier[form] =[ literal[string] ], identifier[prec] = literal[int] )[ literal[int] ], identifier[qa] . identifier[time] ( identifier[qa] . identifier[quantity] ( identifier[stoptime] /( literal[int] * literal[int] ), literal[string] ), identifier[form] =[ literal[string] ], identifier[prec] = literal[int] )[ literal[int] ])) identifier[ms] . identifier[open] ( identifier[d] [ literal[string] ]) keyword[if] identifier[len] ( identifier[d] [ literal[string] ])== literal[int] : identifier[ms] . identifier[selectinit] ( identifier[datadescid] = identifier[d] [ literal[string] ][ literal[int] ]) keyword[else] : identifier[ms] . identifier[selectinit] ( identifier[datadescid] = literal[int] , identifier[reset] = keyword[True] ) identifier[selection] ={ literal[string] :[ identifier[starttime] , identifier[stoptime] ], literal[string] :[ literal[int] , literal[int] ]} identifier[ms] . identifier[select] ( identifier[items] = identifier[selection] ) identifier[ms] . identifier[selectpolarization] ( identifier[d] [ literal[string] ]) identifier[da] = identifier[ms] . identifier[getdata] ([ identifier[d] [ literal[string] ], literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[ifraxis] = keyword[True] ) identifier[good] = identifier[n] . identifier[where] (( identifier[da] [ literal[string] ])== identifier[d] [ literal[string] ][ literal[int] ])[ literal[int] ] identifier[time0] = identifier[da] [ literal[string] ][ literal[string] ][ literal[string] ][ identifier[good] ] identifier[data0] = identifier[n] . identifier[transpose] ( identifier[da] [ identifier[d] [ literal[string] ]], identifier[axes] =[ literal[int] , literal[int] , literal[int] , literal[int] ])[ identifier[good] ] identifier[flag0] = identifier[n] . identifier[transpose] ( identifier[da] [ literal[string] ], identifier[axes] =[ literal[int] , literal[int] , literal[int] , literal[int] ])[ identifier[good] ] identifier[u0] = identifier[da] [ literal[string] ]. identifier[transpose] ()[ identifier[good] ]* identifier[d] [ literal[string] ][ literal[int] ]*( literal[int] / literal[int] ) identifier[v0] = identifier[da] [ literal[string] ]. identifier[transpose] ()[ identifier[good] ]* identifier[d] [ literal[string] ][ literal[int] ]*( literal[int] / literal[int] ) identifier[w0] = identifier[da] [ literal[string] ]. identifier[transpose] ()[ identifier[good] ]* identifier[d] [ literal[string] ][ literal[int] ]*( literal[int] / literal[int] ) keyword[if] identifier[len] ( identifier[d] [ literal[string] ])> literal[int] : keyword[for] identifier[spw] keyword[in] identifier[d] [ literal[string] ][ literal[int] :]: identifier[good] = identifier[n] . identifier[where] (( identifier[da] [ literal[string] ])== identifier[spw] )[ literal[int] ] identifier[data1] = identifier[n] . identifier[transpose] ( identifier[da] [ identifier[d] [ literal[string] ]], identifier[axes] =[ literal[int] , literal[int] , literal[int] , literal[int] ])[ identifier[good] ] identifier[data0] = identifier[n] . identifier[concatenate] (( identifier[data0] , identifier[data1] ), identifier[axis] = literal[int] ) identifier[flag0] = identifier[n] . identifier[concatenate] (( identifier[flag0] , identifier[n] . identifier[transpose] ( identifier[da] [ literal[string] ], identifier[axes] =[ literal[int] , literal[int] , literal[int] , literal[int] ])[ identifier[good] ]), identifier[axis] = literal[int] ) keyword[del] identifier[da] identifier[data0] = identifier[data0] [:,:, identifier[d] [ literal[string] ],:]* identifier[n] . identifier[invert] ( identifier[flag0] [:,:, identifier[d] [ literal[string] ],:]) keyword[return] identifier[data0] . identifier[astype] ( literal[string] ), identifier[u0] . identifier[astype] ( literal[string] ), identifier[v0] . identifier[astype] ( literal[string] ), identifier[w0] . identifier[astype] ( literal[string] )
def readsegment(d, segment): """ Prepare to read segment of data """ # set requested time range based on given parameters starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['segmenttimes'][segment, 0], 'd'), form=['ymd'], prec=9)[0], 's'))[0] stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['segmenttimes'][segment, 1], 'd'), form=['ymd'], prec=9)[0], 's'))[0] logger.info('Reading segment %d/%d, times %s to %s' % (segment, len(d['segmenttimes']) - 1, qa.time(qa.quantity(starttime / (24 * 3600), 'd'), form=['hms'], prec=9)[0], qa.time(qa.quantity(stoptime / (24 * 3600), 'd'), form=['hms'], prec=9)[0])) # read data into data structure ms.open(d['filename']) if len(d['spwlist']) == 1: ms.selectinit(datadescid=d['spwlist'][0]) # depends on [control=['if'], data=[]] else: ms.selectinit(datadescid=0, reset=True) # reset includes spw in iteration over time selection = {'time': [starttime, stoptime], 'uvdist': [1.0, 10000000000.0]} # selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10], 'antenna1': d['ants'], 'antenna2': d['ants']} # **this misses ants for some reason!** ms.select(items=selection) ms.selectpolarization(d['pols']) da = ms.getdata([d['datacol'], 'axis_info', 'u', 'v', 'w', 'flag', 'data_desc_id'], ifraxis=True) good = n.where(da['data_desc_id'] == d['spwlist'][0])[0] # take first spw time0 = da['axis_info']['time_axis']['MJDseconds'][good] data0 = n.transpose(da[d['datacol']], axes=[3, 2, 1, 0])[good] flag0 = n.transpose(da['flag'], axes=[3, 2, 1, 0])[good] u0 = da['u'].transpose()[good] * d['freq_orig'][0] * (1000000000.0 / 300000000.0) # uvw are in m, so divide by wavelength of first chan to set in lambda v0 = da['v'].transpose()[good] * d['freq_orig'][0] * (1000000000.0 / 300000000.0) w0 = da['w'].transpose()[good] * d['freq_orig'][0] * (1000000000.0 / 300000000.0) if len(d['spwlist']) > 1: for spw in d['spwlist'][1:]: good = n.where(da['data_desc_id'] == spw)[0] data1 = n.transpose(da[d['datacol']], axes=[3, 2, 1, 0])[good] data0 = n.concatenate((data0, data1), axis=2) flag0 = n.concatenate((flag0, n.transpose(da['flag'], axes=[3, 2, 1, 0])[good]), axis=2) # depends on [control=['for'], data=['spw']] # depends on [control=['if'], data=[]] del da data0 = data0[:, :, d['chans'], :] * n.invert(flag0[:, :, d['chans'], :]) # flag==1 means bad data (for vla) return (data0.astype('complex64'), u0.astype('float32'), v0.astype('float32'), w0.astype('float32'))
def inspect_config(self, id): """ Retrieve config metadata Args: id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no config with that ID exists """ url = self._url('/configs/{0}', id) return self._result(self._get(url), True)
def function[inspect_config, parameter[self, id]]: constant[ Retrieve config metadata Args: id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no config with that ID exists ] variable[url] assign[=] call[name[self]._url, parameter[constant[/configs/{0}], name[id]]] return[call[name[self]._result, parameter[call[name[self]._get, parameter[name[url]]], constant[True]]]]
keyword[def] identifier[inspect_config] ( identifier[self] , identifier[id] ): literal[string] identifier[url] = identifier[self] . identifier[_url] ( literal[string] , identifier[id] ) keyword[return] identifier[self] . identifier[_result] ( identifier[self] . identifier[_get] ( identifier[url] ), keyword[True] )
def inspect_config(self, id): """ Retrieve config metadata Args: id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no config with that ID exists """ url = self._url('/configs/{0}', id) return self._result(self._get(url), True)
def get_load_balancer(self, id): """ Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID """ return LoadBalancer.get_object(api_token=self.token, id=id)
def function[get_load_balancer, parameter[self, id]]: constant[ Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID ] return[call[name[LoadBalancer].get_object, parameter[]]]
keyword[def] identifier[get_load_balancer] ( identifier[self] , identifier[id] ): literal[string] keyword[return] identifier[LoadBalancer] . identifier[get_object] ( identifier[api_token] = identifier[self] . identifier[token] , identifier[id] = identifier[id] )
def get_load_balancer(self, id): """ Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID """ return LoadBalancer.get_object(api_token=self.token, id=id)
def do_serve(self, repo_name): ''' Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges. ''' self.abort_on_nonexisting_effective_repo(repo_name, 'serve') repo = self.network.get_repo(repo_name) repo.serve()
def function[do_serve, parameter[self, repo_name]]: constant[ Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges. ] call[name[self].abort_on_nonexisting_effective_repo, parameter[name[repo_name], constant[serve]]] variable[repo] assign[=] call[name[self].network.get_repo, parameter[name[repo_name]]] call[name[repo].serve, parameter[]]
keyword[def] identifier[do_serve] ( identifier[self] , identifier[repo_name] ): literal[string] identifier[self] . identifier[abort_on_nonexisting_effective_repo] ( identifier[repo_name] , literal[string] ) identifier[repo] = identifier[self] . identifier[network] . identifier[get_repo] ( identifier[repo_name] ) identifier[repo] . identifier[serve] ()
def do_serve(self, repo_name): """ Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges. """ self.abort_on_nonexisting_effective_repo(repo_name, 'serve') repo = self.network.get_repo(repo_name) repo.serve()
def enhance(self): """Load metadata from a data service to improve naming. :raises tvrenamer.exceptions.ShowNotFound: when unable to find show/series name based on parsed name :raises tvrenamer.exceptions.EpisodeNotFound: when unable to find episode name(s) based on parsed data """ series, error = self.api.get_series_by_name(self.series_name) if series is None: self.messages.append(str(error)) LOG.info(self.messages[-1]) raise exc.ShowNotFound(str(error)) self.series_name = self.api.get_series_name(series) self.episode_names, error = self.api.get_episode_name( series, self.episode_numbers, self.season_number) if self.episode_names is None: self.messages.append(str(error)) LOG.info(self.messages[-1]) raise exc.EpisodeNotFound(str(error))
def function[enhance, parameter[self]]: constant[Load metadata from a data service to improve naming. :raises tvrenamer.exceptions.ShowNotFound: when unable to find show/series name based on parsed name :raises tvrenamer.exceptions.EpisodeNotFound: when unable to find episode name(s) based on parsed data ] <ast.Tuple object at 0x7da1b083b010> assign[=] call[name[self].api.get_series_by_name, parameter[name[self].series_name]] if compare[name[series] is constant[None]] begin[:] call[name[self].messages.append, parameter[call[name[str], parameter[name[error]]]]] call[name[LOG].info, parameter[call[name[self].messages][<ast.UnaryOp object at 0x7da1b08125f0>]]] <ast.Raise object at 0x7da1b0810ee0> name[self].series_name assign[=] call[name[self].api.get_series_name, parameter[name[series]]] <ast.Tuple object at 0x7da1b0811ed0> assign[=] call[name[self].api.get_episode_name, parameter[name[series], name[self].episode_numbers, name[self].season_number]] if compare[name[self].episode_names is constant[None]] begin[:] call[name[self].messages.append, parameter[call[name[str], parameter[name[error]]]]] call[name[LOG].info, parameter[call[name[self].messages][<ast.UnaryOp object at 0x7da1b0812f20>]]] <ast.Raise object at 0x7da1b08128c0>
keyword[def] identifier[enhance] ( identifier[self] ): literal[string] identifier[series] , identifier[error] = identifier[self] . identifier[api] . identifier[get_series_by_name] ( identifier[self] . identifier[series_name] ) keyword[if] identifier[series] keyword[is] keyword[None] : identifier[self] . identifier[messages] . identifier[append] ( identifier[str] ( identifier[error] )) identifier[LOG] . identifier[info] ( identifier[self] . identifier[messages] [- literal[int] ]) keyword[raise] identifier[exc] . identifier[ShowNotFound] ( identifier[str] ( identifier[error] )) identifier[self] . identifier[series_name] = identifier[self] . identifier[api] . identifier[get_series_name] ( identifier[series] ) identifier[self] . identifier[episode_names] , identifier[error] = identifier[self] . identifier[api] . identifier[get_episode_name] ( identifier[series] , identifier[self] . identifier[episode_numbers] , identifier[self] . identifier[season_number] ) keyword[if] identifier[self] . identifier[episode_names] keyword[is] keyword[None] : identifier[self] . identifier[messages] . identifier[append] ( identifier[str] ( identifier[error] )) identifier[LOG] . identifier[info] ( identifier[self] . identifier[messages] [- literal[int] ]) keyword[raise] identifier[exc] . identifier[EpisodeNotFound] ( identifier[str] ( identifier[error] ))
def enhance(self): """Load metadata from a data service to improve naming. :raises tvrenamer.exceptions.ShowNotFound: when unable to find show/series name based on parsed name :raises tvrenamer.exceptions.EpisodeNotFound: when unable to find episode name(s) based on parsed data """ (series, error) = self.api.get_series_by_name(self.series_name) if series is None: self.messages.append(str(error)) LOG.info(self.messages[-1]) raise exc.ShowNotFound(str(error)) # depends on [control=['if'], data=[]] self.series_name = self.api.get_series_name(series) (self.episode_names, error) = self.api.get_episode_name(series, self.episode_numbers, self.season_number) if self.episode_names is None: self.messages.append(str(error)) LOG.info(self.messages[-1]) raise exc.EpisodeNotFound(str(error)) # depends on [control=['if'], data=[]]
def solve_value(self, value, resource): """Solve a resource with a value, without coercing. Arguments --------- value : ? A value to solve in combination with the given resource. The first filter of the resource will be applied on this value (next filters on the result of the previous filter). resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to solve with the given value. Returns ------- The result of all filters applied on the value for the first filter, and result of the previous filter for next filters. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, allow_class=True) >>> registry.register(str) >>> class MySolver(Solver): ... def coerce(self, value, resource): return value >>> solver = MySolver(registry) >>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter >>> field = Field(None, ... filters=[ ... Filter(name='fromtimestamp', args=[PosArg(1433109600)]), ... Filter(name='replace', args=[NamedArg('year', '=', 2014)]), ... Filter(name='strftime', args=[PosArg('%F')]), ... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]), ... ] ... ) >>> solver.solve_value(date, field) '2015-06-01' >>> solver.solve_value(None, field) >>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]} >>> registry.register(dict) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='foo'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) '2015-06-01' >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='bar'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='baz'), ... SliceFilter(0), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) # Example of how to raise a ``CannotSolve`` exception. >>> from dataql.solvers.exceptions import CannotSolve >>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS Traceback (most recent call last): dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`. """ # The given value is the starting point on which we apply the first filter. result = value # Apply filters one by one on the previous result. if result is not None: for filter_ in resource.filters: result = self.registry.solve_filter(result, filter_) if result is None: break return result
def function[solve_value, parameter[self, value, resource]]: constant[Solve a resource with a value, without coercing. Arguments --------- value : ? A value to solve in combination with the given resource. The first filter of the resource will be applied on this value (next filters on the result of the previous filter). resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to solve with the given value. Returns ------- The result of all filters applied on the value for the first filter, and result of the previous filter for next filters. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, allow_class=True) >>> registry.register(str) >>> class MySolver(Solver): ... def coerce(self, value, resource): return value >>> solver = MySolver(registry) >>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter >>> field = Field(None, ... filters=[ ... Filter(name='fromtimestamp', args=[PosArg(1433109600)]), ... Filter(name='replace', args=[NamedArg('year', '=', 2014)]), ... Filter(name='strftime', args=[PosArg('%F')]), ... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]), ... ] ... ) >>> solver.solve_value(date, field) '2015-06-01' >>> solver.solve_value(None, field) >>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]} >>> registry.register(dict) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='foo'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) '2015-06-01' >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='bar'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='baz'), ... SliceFilter(0), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) # Example of how to raise a ``CannotSolve`` exception. >>> from dataql.solvers.exceptions import CannotSolve >>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS Traceback (most recent call last): dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`. ] variable[result] assign[=] name[value] if compare[name[result] is_not constant[None]] begin[:] for taget[name[filter_]] in starred[name[resource].filters] begin[:] variable[result] assign[=] call[name[self].registry.solve_filter, parameter[name[result], name[filter_]]] if compare[name[result] is constant[None]] begin[:] break return[name[result]]
keyword[def] identifier[solve_value] ( identifier[self] , identifier[value] , identifier[resource] ): literal[string] identifier[result] = identifier[value] keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[filter_] keyword[in] identifier[resource] . identifier[filters] : identifier[result] = identifier[self] . identifier[registry] . identifier[solve_filter] ( identifier[result] , identifier[filter_] ) keyword[if] identifier[result] keyword[is] keyword[None] : keyword[break] keyword[return] identifier[result]
def solve_value(self, value, resource): """Solve a resource with a value, without coercing. Arguments --------- value : ? A value to solve in combination with the given resource. The first filter of the resource will be applied on this value (next filters on the result of the previous filter). resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to solve with the given value. Returns ------- The result of all filters applied on the value for the first filter, and result of the previous filter for next filters. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, allow_class=True) >>> registry.register(str) >>> class MySolver(Solver): ... def coerce(self, value, resource): return value >>> solver = MySolver(registry) >>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter >>> field = Field(None, ... filters=[ ... Filter(name='fromtimestamp', args=[PosArg(1433109600)]), ... Filter(name='replace', args=[NamedArg('year', '=', 2014)]), ... Filter(name='strftime', args=[PosArg('%F')]), ... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]), ... ] ... ) >>> solver.solve_value(date, field) '2015-06-01' >>> solver.solve_value(None, field) >>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]} >>> registry.register(dict) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='foo'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) '2015-06-01' >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='bar'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='baz'), ... SliceFilter(0), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) # Example of how to raise a ``CannotSolve`` exception. >>> from dataql.solvers.exceptions import CannotSolve >>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS Traceback (most recent call last): dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`. """ # The given value is the starting point on which we apply the first filter. result = value # Apply filters one by one on the previous result. if result is not None: for filter_ in resource.filters: result = self.registry.solve_filter(result, filter_) if result is None: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filter_']] # depends on [control=['if'], data=['result']] return result
def fetchUser(self, username, rawResults = False) : """Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects""" url = "%s/%s" % (self.URL, username) r = self.connection.session.get(url) if r.status_code == 200 : data = r.json() if rawResults : return data["result"] else : u = User(self, data) return u else : raise KeyError("Unable to get user: %s" % username)
def function[fetchUser, parameter[self, username, rawResults]]: constant[Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects] variable[url] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0f0f730>, <ast.Name object at 0x7da1b0f0e2c0>]]] variable[r] assign[=] call[name[self].connection.session.get, parameter[name[url]]] if compare[name[r].status_code equal[==] constant[200]] begin[:] variable[data] assign[=] call[name[r].json, parameter[]] if name[rawResults] begin[:] return[call[name[data]][constant[result]]]
keyword[def] identifier[fetchUser] ( identifier[self] , identifier[username] , identifier[rawResults] = keyword[False] ): literal[string] identifier[url] = literal[string] %( identifier[self] . identifier[URL] , identifier[username] ) identifier[r] = identifier[self] . identifier[connection] . identifier[session] . identifier[get] ( identifier[url] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : identifier[data] = identifier[r] . identifier[json] () keyword[if] identifier[rawResults] : keyword[return] identifier[data] [ literal[string] ] keyword[else] : identifier[u] = identifier[User] ( identifier[self] , identifier[data] ) keyword[return] identifier[u] keyword[else] : keyword[raise] identifier[KeyError] ( literal[string] % identifier[username] )
def fetchUser(self, username, rawResults=False): """Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects""" url = '%s/%s' % (self.URL, username) r = self.connection.session.get(url) if r.status_code == 200: data = r.json() if rawResults: return data['result'] # depends on [control=['if'], data=[]] else: u = User(self, data) return u # depends on [control=['if'], data=[]] else: raise KeyError('Unable to get user: %s' % username)
def set_data(self, data): """Set data.""" if data != self.editor.model.get_data(): self.editor.set_data(data) self.editor.adjust_columns()
def function[set_data, parameter[self, data]]: constant[Set data.] if compare[name[data] not_equal[!=] call[name[self].editor.model.get_data, parameter[]]] begin[:] call[name[self].editor.set_data, parameter[name[data]]] call[name[self].editor.adjust_columns, parameter[]]
keyword[def] identifier[set_data] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[data] != identifier[self] . identifier[editor] . identifier[model] . identifier[get_data] (): identifier[self] . identifier[editor] . identifier[set_data] ( identifier[data] ) identifier[self] . identifier[editor] . identifier[adjust_columns] ()
def set_data(self, data): """Set data.""" if data != self.editor.model.get_data(): self.editor.set_data(data) self.editor.adjust_columns() # depends on [control=['if'], data=['data']]
async def handle_action(self, action: str, request_id: str, **kwargs): """ run the action. """ try: await self.check_permissions(action, **kwargs) if action not in self.available_actions: raise MethodNotAllowed(method=action) method_name = self.available_actions[action] method = getattr(self, method_name) reply = partial(self.reply, action=action, request_id=request_id) # the @action decorator will wrap non-async action into async ones. response = await method( request_id=request_id, action=action, **kwargs ) if isinstance(response, tuple): data, status = response await reply( data=data, status=status ) except Exception as exc: await self.handle_exception( exc, action=action, request_id=request_id )
<ast.AsyncFunctionDef object at 0x7da204621ae0>
keyword[async] keyword[def] identifier[handle_action] ( identifier[self] , identifier[action] : identifier[str] , identifier[request_id] : identifier[str] ,** identifier[kwargs] ): literal[string] keyword[try] : keyword[await] identifier[self] . identifier[check_permissions] ( identifier[action] ,** identifier[kwargs] ) keyword[if] identifier[action] keyword[not] keyword[in] identifier[self] . identifier[available_actions] : keyword[raise] identifier[MethodNotAllowed] ( identifier[method] = identifier[action] ) identifier[method_name] = identifier[self] . identifier[available_actions] [ identifier[action] ] identifier[method] = identifier[getattr] ( identifier[self] , identifier[method_name] ) identifier[reply] = identifier[partial] ( identifier[self] . identifier[reply] , identifier[action] = identifier[action] , identifier[request_id] = identifier[request_id] ) identifier[response] = keyword[await] identifier[method] ( identifier[request_id] = identifier[request_id] , identifier[action] = identifier[action] , ** identifier[kwargs] ) keyword[if] identifier[isinstance] ( identifier[response] , identifier[tuple] ): identifier[data] , identifier[status] = identifier[response] keyword[await] identifier[reply] ( identifier[data] = identifier[data] , identifier[status] = identifier[status] ) keyword[except] identifier[Exception] keyword[as] identifier[exc] : keyword[await] identifier[self] . identifier[handle_exception] ( identifier[exc] , identifier[action] = identifier[action] , identifier[request_id] = identifier[request_id] )
async def handle_action(self, action: str, request_id: str, **kwargs): """ run the action. """ try: await self.check_permissions(action, **kwargs) if action not in self.available_actions: raise MethodNotAllowed(method=action) # depends on [control=['if'], data=['action']] method_name = self.available_actions[action] method = getattr(self, method_name) reply = partial(self.reply, action=action, request_id=request_id) # the @action decorator will wrap non-async action into async ones. response = await method(request_id=request_id, action=action, **kwargs) if isinstance(response, tuple): (data, status) = response await reply(data=data, status=status) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as exc: await self.handle_exception(exc, action=action, request_id=request_id) # depends on [control=['except'], data=['exc']]
def call_once(func): """Decorate a function to only allow it to be called once. Note that it doesn't make sense to only call a function once if it takes arguments (use @functools.lru_cache for that sort of thing), so this only works on callables that take no args. """ argspec = inspect.getargspec(func) if argspec.args or argspec.varargs or argspec.keywords: raise ValueError('Can only decorate functions with no args', func, argspec) @functools.wraps(func) def _wrapper(): # If we haven't been called yet, actually invoke func and save the result. if not _wrapper.HasRun(): _wrapper.MarkAsRun() _wrapper.return_value = func() return _wrapper.return_value _wrapper.has_run = False _wrapper.HasRun = lambda: _wrapper.has_run _wrapper.MarkAsRun = lambda: setattr(_wrapper, 'has_run', True) return _wrapper
def function[call_once, parameter[func]]: constant[Decorate a function to only allow it to be called once. Note that it doesn't make sense to only call a function once if it takes arguments (use @functools.lru_cache for that sort of thing), so this only works on callables that take no args. ] variable[argspec] assign[=] call[name[inspect].getargspec, parameter[name[func]]] if <ast.BoolOp object at 0x7da1b18a0c10> begin[:] <ast.Raise object at 0x7da1b18a06a0> def function[_wrapper, parameter[]]: if <ast.UnaryOp object at 0x7da1b18a2380> begin[:] call[name[_wrapper].MarkAsRun, parameter[]] name[_wrapper].return_value assign[=] call[name[func], parameter[]] return[name[_wrapper].return_value] name[_wrapper].has_run assign[=] constant[False] name[_wrapper].HasRun assign[=] <ast.Lambda object at 0x7da1b18a3040> name[_wrapper].MarkAsRun assign[=] <ast.Lambda object at 0x7da1b18c2a70> return[name[_wrapper]]
keyword[def] identifier[call_once] ( identifier[func] ): literal[string] identifier[argspec] = identifier[inspect] . identifier[getargspec] ( identifier[func] ) keyword[if] identifier[argspec] . identifier[args] keyword[or] identifier[argspec] . identifier[varargs] keyword[or] identifier[argspec] . identifier[keywords] : keyword[raise] identifier[ValueError] ( literal[string] , identifier[func] , identifier[argspec] ) @ identifier[functools] . identifier[wraps] ( identifier[func] ) keyword[def] identifier[_wrapper] (): keyword[if] keyword[not] identifier[_wrapper] . identifier[HasRun] (): identifier[_wrapper] . identifier[MarkAsRun] () identifier[_wrapper] . identifier[return_value] = identifier[func] () keyword[return] identifier[_wrapper] . identifier[return_value] identifier[_wrapper] . identifier[has_run] = keyword[False] identifier[_wrapper] . identifier[HasRun] = keyword[lambda] : identifier[_wrapper] . identifier[has_run] identifier[_wrapper] . identifier[MarkAsRun] = keyword[lambda] : identifier[setattr] ( identifier[_wrapper] , literal[string] , keyword[True] ) keyword[return] identifier[_wrapper]
def call_once(func): """Decorate a function to only allow it to be called once. Note that it doesn't make sense to only call a function once if it takes arguments (use @functools.lru_cache for that sort of thing), so this only works on callables that take no args. """ argspec = inspect.getargspec(func) if argspec.args or argspec.varargs or argspec.keywords: raise ValueError('Can only decorate functions with no args', func, argspec) # depends on [control=['if'], data=[]] @functools.wraps(func) def _wrapper(): # If we haven't been called yet, actually invoke func and save the result. if not _wrapper.HasRun(): _wrapper.MarkAsRun() _wrapper.return_value = func() # depends on [control=['if'], data=[]] return _wrapper.return_value _wrapper.has_run = False _wrapper.HasRun = lambda : _wrapper.has_run _wrapper.MarkAsRun = lambda : setattr(_wrapper, 'has_run', True) return _wrapper
def _ParseTriggerEndTime(self, parser_mediator, trigger): """Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. """ time_elements_tuple = ( trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_DAY # pylint: disable=protected-access except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid trigger end time: {0!s}'.format(time_elements_tuple)) return date_time
def function[_ParseTriggerEndTime, parameter[self, parser_mediator, trigger]]: constant[Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. ] variable[time_elements_tuple] assign[=] tuple[[<ast.Attribute object at 0x7da20c7c83d0>, <ast.Attribute object at 0x7da20c7c8460>, <ast.Attribute object at 0x7da20c7ca230>, <ast.Constant object at 0x7da20c7c91b0>, <ast.Constant object at 0x7da20c7caad0>, <ast.Constant object at 0x7da20c7cac20>]] variable[date_time] assign[=] constant[None] if compare[name[time_elements_tuple] not_equal[!=] tuple[[<ast.Constant object at 0x7da20c7cb070>, <ast.Constant object at 0x7da20c7cb730>, <ast.Constant object at 0x7da20c7c85b0>, <ast.Constant object at 0x7da20c7c9db0>, <ast.Constant object at 0x7da20c7c98a0>, <ast.Constant object at 0x7da20c7ca170>]]] begin[:] <ast.Try object at 0x7da20c7c8b20> return[name[date_time]]
keyword[def] identifier[_ParseTriggerEndTime] ( identifier[self] , identifier[parser_mediator] , identifier[trigger] ): literal[string] identifier[time_elements_tuple] =( identifier[trigger] . identifier[end_date] . identifier[year] , identifier[trigger] . identifier[end_date] . identifier[month] , identifier[trigger] . identifier[end_date] . identifier[day_of_month] , literal[int] , literal[int] , literal[int] ) identifier[date_time] = keyword[None] keyword[if] identifier[time_elements_tuple] !=( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ): keyword[try] : identifier[date_time] = identifier[dfdatetime_time_elements] . identifier[TimeElements] ( identifier[time_elements_tuple] = identifier[time_elements_tuple] ) identifier[date_time] . identifier[is_local_time] = keyword[True] identifier[date_time] . identifier[_precision] = identifier[dfdatetime_definitions] . identifier[PRECISION_1_DAY] keyword[except] identifier[ValueError] : identifier[parser_mediator] . identifier[ProduceExtractionWarning] ( literal[string] . identifier[format] ( identifier[time_elements_tuple] )) keyword[return] identifier[date_time]
def _ParseTriggerEndTime(self, parser_mediator, trigger): """Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available. """ time_elements_tuple = (trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_DAY # pylint: disable=protected-access # depends on [control=['try'], data=[]] except ValueError: parser_mediator.ProduceExtractionWarning('invalid trigger end time: {0!s}'.format(time_elements_tuple)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['time_elements_tuple']] return date_time
def dict_to_source(dict): ''' Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument. ''' if isinstance(dict, Source): return dict return Source( dict['citation'], dict.get('markup') )
def function[dict_to_source, parameter[dict]]: constant[ Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument. ] if call[name[isinstance], parameter[name[dict], name[Source]]] begin[:] return[name[dict]] return[call[name[Source], parameter[call[name[dict]][constant[citation]], call[name[dict].get, parameter[constant[markup]]]]]]
keyword[def] identifier[dict_to_source] ( identifier[dict] ): literal[string] keyword[if] identifier[isinstance] ( identifier[dict] , identifier[Source] ): keyword[return] identifier[dict] keyword[return] identifier[Source] ( identifier[dict] [ literal[string] ], identifier[dict] . identifier[get] ( literal[string] ) )
def dict_to_source(dict): """ Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument. """ if isinstance(dict, Source): return dict # depends on [control=['if'], data=[]] return Source(dict['citation'], dict.get('markup'))
def groupby(df, *, group_cols: Union[str, List[str]], aggregations: Dict[str, Union[str, List[str]]]): """ Aggregate values by groups. --- ### Parameters *mandatory :* - `group_cols` (*list*): list of columns used to group data - `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation function to use as values (See the [list of aggregation functions]( https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)) --- ### Example **Input** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 10 | 3 | | A | 2017 | 20 | 1 | | A | 2018 | 10 | 5 | | A | 2018 | 30 | 4 | | B | 2017 | 60 | 4 | | B | 2017 | 40 | 3 | | B | 2018 | 50 | 7 | | B | 2018 | 60 | 6 | ```cson groupby: group_cols: ['ENTITY', 'YEAR'] aggregations: 'VALUE_1': 'sum', 'VALUE_2': 'mean' ``` **Output** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 30 | 2.0 | | A | 2018 | 40 | 4.5 | | B | 2017 | 100 | 3.5 | | B | 2018 | 110 | 6.5 | """ df = df.groupby(group_cols, as_index=False).agg(aggregations) # When several aggregations are performed on the same column, pandas return # a multi-indexed dataframe, so we need to flatten the columns index to get # back to a unique level header if df.columns.nlevels == 2: level_0 = df.columns.get_level_values(0) level_1 = df.columns.get_level_values(1) new_columns = [(f'{x}_{y}' if x else y) for (x, y) in zip(level_1, level_0)] df.columns = new_columns return df
def function[groupby, parameter[df]]: constant[ Aggregate values by groups. --- ### Parameters *mandatory :* - `group_cols` (*list*): list of columns used to group data - `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation function to use as values (See the [list of aggregation functions]( https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)) --- ### Example **Input** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 10 | 3 | | A | 2017 | 20 | 1 | | A | 2018 | 10 | 5 | | A | 2018 | 30 | 4 | | B | 2017 | 60 | 4 | | B | 2017 | 40 | 3 | | B | 2018 | 50 | 7 | | B | 2018 | 60 | 6 | ```cson groupby: group_cols: ['ENTITY', 'YEAR'] aggregations: 'VALUE_1': 'sum', 'VALUE_2': 'mean' ``` **Output** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 30 | 2.0 | | A | 2018 | 40 | 4.5 | | B | 2017 | 100 | 3.5 | | B | 2018 | 110 | 6.5 | ] variable[df] assign[=] call[call[name[df].groupby, parameter[name[group_cols]]].agg, parameter[name[aggregations]]] if compare[name[df].columns.nlevels equal[==] constant[2]] begin[:] variable[level_0] assign[=] call[name[df].columns.get_level_values, parameter[constant[0]]] variable[level_1] assign[=] call[name[df].columns.get_level_values, parameter[constant[1]]] variable[new_columns] assign[=] <ast.ListComp object at 0x7da1b0399930> name[df].columns assign[=] name[new_columns] return[name[df]]
keyword[def] identifier[groupby] ( identifier[df] ,*, identifier[group_cols] : identifier[Union] [ identifier[str] , identifier[List] [ identifier[str] ]], identifier[aggregations] : identifier[Dict] [ identifier[str] , identifier[Union] [ identifier[str] , identifier[List] [ identifier[str] ]]]): literal[string] identifier[df] = identifier[df] . identifier[groupby] ( identifier[group_cols] , identifier[as_index] = keyword[False] ). identifier[agg] ( identifier[aggregations] ) keyword[if] identifier[df] . identifier[columns] . identifier[nlevels] == literal[int] : identifier[level_0] = identifier[df] . identifier[columns] . identifier[get_level_values] ( literal[int] ) identifier[level_1] = identifier[df] . identifier[columns] . identifier[get_level_values] ( literal[int] ) identifier[new_columns] =[( literal[string] keyword[if] identifier[x] keyword[else] identifier[y] ) keyword[for] ( identifier[x] , identifier[y] ) keyword[in] identifier[zip] ( identifier[level_1] , identifier[level_0] )] identifier[df] . identifier[columns] = identifier[new_columns] keyword[return] identifier[df]
def groupby(df, *, group_cols: Union[str, List[str]], aggregations: Dict[str, Union[str, List[str]]]): """ Aggregate values by groups. --- ### Parameters *mandatory :* - `group_cols` (*list*): list of columns used to group data - `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation function to use as values (See the [list of aggregation functions]( https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation)) --- ### Example **Input** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 10 | 3 | | A | 2017 | 20 | 1 | | A | 2018 | 10 | 5 | | A | 2018 | 30 | 4 | | B | 2017 | 60 | 4 | | B | 2017 | 40 | 3 | | B | 2018 | 50 | 7 | | B | 2018 | 60 | 6 | ```cson groupby: group_cols: ['ENTITY', 'YEAR'] aggregations: 'VALUE_1': 'sum', 'VALUE_2': 'mean' ``` **Output** | ENTITY | YEAR | VALUE_1 | VALUE_2 | |:------:|:----:|:-------:|:-------:| | A | 2017 | 30 | 2.0 | | A | 2018 | 40 | 4.5 | | B | 2017 | 100 | 3.5 | | B | 2018 | 110 | 6.5 | """ df = df.groupby(group_cols, as_index=False).agg(aggregations) # When several aggregations are performed on the same column, pandas return # a multi-indexed dataframe, so we need to flatten the columns index to get # back to a unique level header if df.columns.nlevels == 2: level_0 = df.columns.get_level_values(0) level_1 = df.columns.get_level_values(1) new_columns = [f'{x}_{y}' if x else y for (x, y) in zip(level_1, level_0)] df.columns = new_columns # depends on [control=['if'], data=[]] return df
def colorize(arr, colors, values): """Colorize a monochromatic array *arr*, based *colors* given for *values*. Interpolation is used. *values* must be in ascending order. """ hcolors = np.array([rgb2hcl(*i[:3]) for i in colors]) # unwrap colormap in hcl space hcolors[:, 0] = np.rad2deg(np.unwrap(np.deg2rad(np.array(hcolors)[:, 0]))) channels = [np.interp(arr, np.array(values), np.array(hcolors)[:, i]) for i in range(3)] channels = list(hcl2rgb(*channels)) rest = [np.interp(arr, np.array(values), np.array(colors)[:, i + 3]) for i in range(np.array(colors).shape[1] - 3)] channels.extend(rest) try: return [np.ma.array(channel, mask=arr.mask) for channel in channels] except AttributeError: return channels
def function[colorize, parameter[arr, colors, values]]: constant[Colorize a monochromatic array *arr*, based *colors* given for *values*. Interpolation is used. *values* must be in ascending order. ] variable[hcolors] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b052a950>]] call[name[hcolors]][tuple[[<ast.Slice object at 0x7da1b052a980>, <ast.Constant object at 0x7da1b052b250>]]] assign[=] call[name[np].rad2deg, parameter[call[name[np].unwrap, parameter[call[name[np].deg2rad, parameter[call[call[name[np].array, parameter[name[hcolors]]]][tuple[[<ast.Slice object at 0x7da1b052b700>, <ast.Constant object at 0x7da1b0528e20>]]]]]]]]] variable[channels] assign[=] <ast.ListComp object at 0x7da1b052a0b0> variable[channels] assign[=] call[name[list], parameter[call[name[hcl2rgb], parameter[<ast.Starred object at 0x7da1b0529f90>]]]] variable[rest] assign[=] <ast.ListComp object at 0x7da1b052b040> call[name[channels].extend, parameter[name[rest]]] <ast.Try object at 0x7da1b0529600>
keyword[def] identifier[colorize] ( identifier[arr] , identifier[colors] , identifier[values] ): literal[string] identifier[hcolors] = identifier[np] . identifier[array] ([ identifier[rgb2hcl] (* identifier[i] [: literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[colors] ]) identifier[hcolors] [:, literal[int] ]= identifier[np] . identifier[rad2deg] ( identifier[np] . identifier[unwrap] ( identifier[np] . identifier[deg2rad] ( identifier[np] . identifier[array] ( identifier[hcolors] )[:, literal[int] ]))) identifier[channels] =[ identifier[np] . identifier[interp] ( identifier[arr] , identifier[np] . identifier[array] ( identifier[values] ), identifier[np] . identifier[array] ( identifier[hcolors] )[:, identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[channels] = identifier[list] ( identifier[hcl2rgb] (* identifier[channels] )) identifier[rest] =[ identifier[np] . identifier[interp] ( identifier[arr] , identifier[np] . identifier[array] ( identifier[values] ), identifier[np] . identifier[array] ( identifier[colors] )[:, identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[np] . identifier[array] ( identifier[colors] ). identifier[shape] [ literal[int] ]- literal[int] )] identifier[channels] . identifier[extend] ( identifier[rest] ) keyword[try] : keyword[return] [ identifier[np] . identifier[ma] . identifier[array] ( identifier[channel] , identifier[mask] = identifier[arr] . identifier[mask] ) keyword[for] identifier[channel] keyword[in] identifier[channels] ] keyword[except] identifier[AttributeError] : keyword[return] identifier[channels]
def colorize(arr, colors, values): """Colorize a monochromatic array *arr*, based *colors* given for *values*. Interpolation is used. *values* must be in ascending order. """ hcolors = np.array([rgb2hcl(*i[:3]) for i in colors]) # unwrap colormap in hcl space hcolors[:, 0] = np.rad2deg(np.unwrap(np.deg2rad(np.array(hcolors)[:, 0]))) channels = [np.interp(arr, np.array(values), np.array(hcolors)[:, i]) for i in range(3)] channels = list(hcl2rgb(*channels)) rest = [np.interp(arr, np.array(values), np.array(colors)[:, i + 3]) for i in range(np.array(colors).shape[1] - 3)] channels.extend(rest) try: return [np.ma.array(channel, mask=arr.mask) for channel in channels] # depends on [control=['try'], data=[]] except AttributeError: return channels # depends on [control=['except'], data=[]]
def convert_sequence_to_motor_units(cycles, unit_converter): """ Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter """ # Make a deep copy of cycles so that the conversions don't damage # the original one. cv_cycles = copy.deepcopy(cycles) # Go through each cycle and do the conversions. for cycle in cv_cycles: # Go through each of the moves and do the conversions. for move in cycle['moves']: move['A'] = unit_converter.to_motor_velocity_acceleration( \ move['A']) move['AD'] = \ unit_converter.to_motor_velocity_acceleration( \ move['AD']) move['V'] = unit_converter.to_motor_velocity_acceleration( \ move['V']) move['D'] = int(unit_converter.to_motor_distance(move['D'])) # Now return the converted move sequence. return cv_cycles
def function[convert_sequence_to_motor_units, parameter[cycles, unit_converter]]: constant[ Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter ] variable[cv_cycles] assign[=] call[name[copy].deepcopy, parameter[name[cycles]]] for taget[name[cycle]] in starred[name[cv_cycles]] begin[:] for taget[name[move]] in starred[call[name[cycle]][constant[moves]]] begin[:] call[name[move]][constant[A]] assign[=] call[name[unit_converter].to_motor_velocity_acceleration, parameter[call[name[move]][constant[A]]]] call[name[move]][constant[AD]] assign[=] call[name[unit_converter].to_motor_velocity_acceleration, parameter[call[name[move]][constant[AD]]]] call[name[move]][constant[V]] assign[=] call[name[unit_converter].to_motor_velocity_acceleration, parameter[call[name[move]][constant[V]]]] call[name[move]][constant[D]] assign[=] call[name[int], parameter[call[name[unit_converter].to_motor_distance, parameter[call[name[move]][constant[D]]]]]] return[name[cv_cycles]]
keyword[def] identifier[convert_sequence_to_motor_units] ( identifier[cycles] , identifier[unit_converter] ): literal[string] identifier[cv_cycles] = identifier[copy] . identifier[deepcopy] ( identifier[cycles] ) keyword[for] identifier[cycle] keyword[in] identifier[cv_cycles] : keyword[for] identifier[move] keyword[in] identifier[cycle] [ literal[string] ]: identifier[move] [ literal[string] ]= identifier[unit_converter] . identifier[to_motor_velocity_acceleration] ( identifier[move] [ literal[string] ]) identifier[move] [ literal[string] ]= identifier[unit_converter] . identifier[to_motor_velocity_acceleration] ( identifier[move] [ literal[string] ]) identifier[move] [ literal[string] ]= identifier[unit_converter] . identifier[to_motor_velocity_acceleration] ( identifier[move] [ literal[string] ]) identifier[move] [ literal[string] ]= identifier[int] ( identifier[unit_converter] . identifier[to_motor_distance] ( identifier[move] [ literal[string] ])) keyword[return] identifier[cv_cycles]
def convert_sequence_to_motor_units(cycles, unit_converter): """ Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter """ # Make a deep copy of cycles so that the conversions don't damage # the original one. cv_cycles = copy.deepcopy(cycles) # Go through each cycle and do the conversions. for cycle in cv_cycles: # Go through each of the moves and do the conversions. for move in cycle['moves']: move['A'] = unit_converter.to_motor_velocity_acceleration(move['A']) move['AD'] = unit_converter.to_motor_velocity_acceleration(move['AD']) move['V'] = unit_converter.to_motor_velocity_acceleration(move['V']) move['D'] = int(unit_converter.to_motor_distance(move['D'])) # depends on [control=['for'], data=['move']] # depends on [control=['for'], data=['cycle']] # Now return the converted move sequence. return cv_cycles
def normalize_contract_type( contract_type_data: Dict[str, Any] ) -> Iterable[Tuple[str, Any]]: """ Serialize contract_data found in compiler output to the defined fields. """ yield "abi", contract_type_data["abi"] if "evm" in contract_type_data: if "bytecode" in contract_type_data["evm"]: yield "deployment_bytecode", normalize_bytecode_object( contract_type_data["evm"]["bytecode"] ) if "deployedBytecode" in contract_type_data["evm"]: yield "runtime_bytecode", normalize_bytecode_object( contract_type_data["evm"]["deployedBytecode"] ) if any(key in contract_type_data for key in NATSPEC_FIELDS): natspec = deep_merge_dicts( contract_type_data.get("userdoc", {}), contract_type_data.get("devdoc", {}) ) yield "natspec", natspec # make sure metadata isn't an empty string in solc output if "metadata" in contract_type_data and contract_type_data["metadata"]: yield "compiler", normalize_compiler_object( json.loads(contract_type_data["metadata"]) )
def function[normalize_contract_type, parameter[contract_type_data]]: constant[ Serialize contract_data found in compiler output to the defined fields. ] <ast.Yield object at 0x7da20e960130> if compare[constant[evm] in name[contract_type_data]] begin[:] if compare[constant[bytecode] in call[name[contract_type_data]][constant[evm]]] begin[:] <ast.Yield object at 0x7da20e962320> if compare[constant[deployedBytecode] in call[name[contract_type_data]][constant[evm]]] begin[:] <ast.Yield object at 0x7da2044c3a60> if call[name[any], parameter[<ast.GeneratorExp object at 0x7da2044c1570>]] begin[:] variable[natspec] assign[=] call[name[deep_merge_dicts], parameter[call[name[contract_type_data].get, parameter[constant[userdoc], dictionary[[], []]]], call[name[contract_type_data].get, parameter[constant[devdoc], dictionary[[], []]]]]] <ast.Yield object at 0x7da2044c36d0> if <ast.BoolOp object at 0x7da2044c1e70> begin[:] <ast.Yield object at 0x7da2044c07f0>
keyword[def] identifier[normalize_contract_type] ( identifier[contract_type_data] : identifier[Dict] [ identifier[str] , identifier[Any] ] )-> identifier[Iterable] [ identifier[Tuple] [ identifier[str] , identifier[Any] ]]: literal[string] keyword[yield] literal[string] , identifier[contract_type_data] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[contract_type_data] : keyword[if] literal[string] keyword[in] identifier[contract_type_data] [ literal[string] ]: keyword[yield] literal[string] , identifier[normalize_bytecode_object] ( identifier[contract_type_data] [ literal[string] ][ literal[string] ] ) keyword[if] literal[string] keyword[in] identifier[contract_type_data] [ literal[string] ]: keyword[yield] literal[string] , identifier[normalize_bytecode_object] ( identifier[contract_type_data] [ literal[string] ][ literal[string] ] ) keyword[if] identifier[any] ( identifier[key] keyword[in] identifier[contract_type_data] keyword[for] identifier[key] keyword[in] identifier[NATSPEC_FIELDS] ): identifier[natspec] = identifier[deep_merge_dicts] ( identifier[contract_type_data] . identifier[get] ( literal[string] ,{}), identifier[contract_type_data] . identifier[get] ( literal[string] ,{}) ) keyword[yield] literal[string] , identifier[natspec] keyword[if] literal[string] keyword[in] identifier[contract_type_data] keyword[and] identifier[contract_type_data] [ literal[string] ]: keyword[yield] literal[string] , identifier[normalize_compiler_object] ( identifier[json] . identifier[loads] ( identifier[contract_type_data] [ literal[string] ]) )
def normalize_contract_type(contract_type_data: Dict[str, Any]) -> Iterable[Tuple[str, Any]]: """ Serialize contract_data found in compiler output to the defined fields. """ yield ('abi', contract_type_data['abi']) if 'evm' in contract_type_data: if 'bytecode' in contract_type_data['evm']: yield ('deployment_bytecode', normalize_bytecode_object(contract_type_data['evm']['bytecode'])) # depends on [control=['if'], data=[]] if 'deployedBytecode' in contract_type_data['evm']: yield ('runtime_bytecode', normalize_bytecode_object(contract_type_data['evm']['deployedBytecode'])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['contract_type_data']] if any((key in contract_type_data for key in NATSPEC_FIELDS)): natspec = deep_merge_dicts(contract_type_data.get('userdoc', {}), contract_type_data.get('devdoc', {})) yield ('natspec', natspec) # depends on [control=['if'], data=[]] # make sure metadata isn't an empty string in solc output if 'metadata' in contract_type_data and contract_type_data['metadata']: yield ('compiler', normalize_compiler_object(json.loads(contract_type_data['metadata']))) # depends on [control=['if'], data=[]]
def det_refpoint(self, angle): """Return the detector reference point position at ``angle``. For an angle ``phi``, the detector position is given by :: det_ref(phi) = translation + rot_matrix(phi) * (det_rad * src_to_det_init) + (offset_along_axis + pitch * phi) * axis where ``src_to_det_init`` is the initial unit vector pointing from source to detector. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of the detector. Returns ------- refpt : `numpy.ndarray` Vector(s) pointing from the origin to the detector reference point. If ``angle`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``angle.shape + (3,)``. See Also -------- src_position Examples -------- With default arguments, the detector starts at ``det_rad * e_y`` and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at 90 degrees: >>> apart = odl.uniform_partition(0, 4 * np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> geom = ConeFlatGeometry( ... apart, dpart, src_radius=5, det_radius=10, pitch=2) >>> geom.det_refpoint(0) array([ 0., 10., 0.]) >>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5]) True The method is vectorized, i.e., it can be called with multiple angles at once (or an n-dimensional array of angles): >>> points = geom.det_refpoint([0, np.pi / 2]) >>> np.allclose(points[0], [0, 10, 0]) True >>> np.allclose(points[1], [-10, 0, 0.5]) True >>> geom.det_refpoint(np.zeros((4, 5))).shape (4, 5, 3) """ squeeze_out = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) rot_matrix = self.rotation_matrix(angle) extra_dims = angle.ndim # Initial vector from center of rotation to detector. # It can be computed this way since source and detector are at # maximum distance, i.e. the connecting line passes the center. center_to_det_init = self.det_radius * self.src_to_det_init # `circle_component` has shape (a, ndim) circle_component = rot_matrix.dot(center_to_det_init) # Increment along the rotation axis according to pitch and # offset_along_axis # `shift_along_axis` has shape angles.shape shift_along_axis = (self.offset_along_axis + self.pitch * angle / (2 * np.pi)) # Create outer product of `shift_along_axis` and `axis`, resulting # in shape (a, ndim) pitch_component = np.multiply.outer(shift_along_axis, self.axis) # Broadcast translation along extra dimensions transl_slc = (None,) * extra_dims + (slice(None),) refpt = (self.translation[transl_slc] + circle_component + pitch_component) if squeeze_out: refpt = refpt.squeeze() return refpt
def function[det_refpoint, parameter[self, angle]]: constant[Return the detector reference point position at ``angle``. For an angle ``phi``, the detector position is given by :: det_ref(phi) = translation + rot_matrix(phi) * (det_rad * src_to_det_init) + (offset_along_axis + pitch * phi) * axis where ``src_to_det_init`` is the initial unit vector pointing from source to detector. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of the detector. Returns ------- refpt : `numpy.ndarray` Vector(s) pointing from the origin to the detector reference point. If ``angle`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``angle.shape + (3,)``. See Also -------- src_position Examples -------- With default arguments, the detector starts at ``det_rad * e_y`` and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at 90 degrees: >>> apart = odl.uniform_partition(0, 4 * np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> geom = ConeFlatGeometry( ... apart, dpart, src_radius=5, det_radius=10, pitch=2) >>> geom.det_refpoint(0) array([ 0., 10., 0.]) >>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5]) True The method is vectorized, i.e., it can be called with multiple angles at once (or an n-dimensional array of angles): >>> points = geom.det_refpoint([0, np.pi / 2]) >>> np.allclose(points[0], [0, 10, 0]) True >>> np.allclose(points[1], [-10, 0, 0.5]) True >>> geom.det_refpoint(np.zeros((4, 5))).shape (4, 5, 3) ] variable[squeeze_out] assign[=] compare[call[name[np].shape, parameter[name[angle]]] equal[==] tuple[[]]] variable[angle] assign[=] call[name[np].array, parameter[name[angle]]] variable[rot_matrix] assign[=] call[name[self].rotation_matrix, parameter[name[angle]]] variable[extra_dims] assign[=] name[angle].ndim variable[center_to_det_init] assign[=] binary_operation[name[self].det_radius * name[self].src_to_det_init] variable[circle_component] assign[=] call[name[rot_matrix].dot, parameter[name[center_to_det_init]]] variable[shift_along_axis] assign[=] binary_operation[name[self].offset_along_axis + binary_operation[binary_operation[name[self].pitch * name[angle]] / binary_operation[constant[2] * name[np].pi]]] variable[pitch_component] assign[=] call[name[np].multiply.outer, parameter[name[shift_along_axis], name[self].axis]] variable[transl_slc] assign[=] binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da1b1e941c0>]] * name[extra_dims]] + tuple[[<ast.Call object at 0x7da1b1e97790>]]] variable[refpt] assign[=] binary_operation[binary_operation[call[name[self].translation][name[transl_slc]] + name[circle_component]] + name[pitch_component]] if name[squeeze_out] begin[:] variable[refpt] assign[=] call[name[refpt].squeeze, parameter[]] return[name[refpt]]
keyword[def] identifier[det_refpoint] ( identifier[self] , identifier[angle] ): literal[string] identifier[squeeze_out] =( identifier[np] . identifier[shape] ( identifier[angle] )==()) identifier[angle] = identifier[np] . identifier[array] ( identifier[angle] , identifier[dtype] = identifier[float] , identifier[copy] = keyword[False] , identifier[ndmin] = literal[int] ) identifier[rot_matrix] = identifier[self] . identifier[rotation_matrix] ( identifier[angle] ) identifier[extra_dims] = identifier[angle] . identifier[ndim] identifier[center_to_det_init] = identifier[self] . identifier[det_radius] * identifier[self] . identifier[src_to_det_init] identifier[circle_component] = identifier[rot_matrix] . identifier[dot] ( identifier[center_to_det_init] ) identifier[shift_along_axis] =( identifier[self] . identifier[offset_along_axis] + identifier[self] . identifier[pitch] * identifier[angle] /( literal[int] * identifier[np] . identifier[pi] )) identifier[pitch_component] = identifier[np] . identifier[multiply] . identifier[outer] ( identifier[shift_along_axis] , identifier[self] . identifier[axis] ) identifier[transl_slc] =( keyword[None] ,)* identifier[extra_dims] +( identifier[slice] ( keyword[None] ),) identifier[refpt] =( identifier[self] . identifier[translation] [ identifier[transl_slc] ] + identifier[circle_component] + identifier[pitch_component] ) keyword[if] identifier[squeeze_out] : identifier[refpt] = identifier[refpt] . identifier[squeeze] () keyword[return] identifier[refpt]
def det_refpoint(self, angle): """Return the detector reference point position at ``angle``. For an angle ``phi``, the detector position is given by :: det_ref(phi) = translation + rot_matrix(phi) * (det_rad * src_to_det_init) + (offset_along_axis + pitch * phi) * axis where ``src_to_det_init`` is the initial unit vector pointing from source to detector. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of the detector. Returns ------- refpt : `numpy.ndarray` Vector(s) pointing from the origin to the detector reference point. If ``angle`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``angle.shape + (3,)``. See Also -------- src_position Examples -------- With default arguments, the detector starts at ``det_rad * e_y`` and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at 90 degrees: >>> apart = odl.uniform_partition(0, 4 * np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> geom = ConeFlatGeometry( ... apart, dpart, src_radius=5, det_radius=10, pitch=2) >>> geom.det_refpoint(0) array([ 0., 10., 0.]) >>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5]) True The method is vectorized, i.e., it can be called with multiple angles at once (or an n-dimensional array of angles): >>> points = geom.det_refpoint([0, np.pi / 2]) >>> np.allclose(points[0], [0, 10, 0]) True >>> np.allclose(points[1], [-10, 0, 0.5]) True >>> geom.det_refpoint(np.zeros((4, 5))).shape (4, 5, 3) """ squeeze_out = np.shape(angle) == () angle = np.array(angle, dtype=float, copy=False, ndmin=1) rot_matrix = self.rotation_matrix(angle) extra_dims = angle.ndim # Initial vector from center of rotation to detector. # It can be computed this way since source and detector are at # maximum distance, i.e. the connecting line passes the center. center_to_det_init = self.det_radius * self.src_to_det_init # `circle_component` has shape (a, ndim) circle_component = rot_matrix.dot(center_to_det_init) # Increment along the rotation axis according to pitch and # offset_along_axis # `shift_along_axis` has shape angles.shape shift_along_axis = self.offset_along_axis + self.pitch * angle / (2 * np.pi) # Create outer product of `shift_along_axis` and `axis`, resulting # in shape (a, ndim) pitch_component = np.multiply.outer(shift_along_axis, self.axis) # Broadcast translation along extra dimensions transl_slc = (None,) * extra_dims + (slice(None),) refpt = self.translation[transl_slc] + circle_component + pitch_component if squeeze_out: refpt = refpt.squeeze() # depends on [control=['if'], data=[]] return refpt
def normalizeKerningKey(value): """ Normalizes kerning key. * **value** must be a ``tuple`` or ``list``. * **value** must contain only two members. * **value** items must be :ref:`type-string`. * **value** items must be at least one character long. * Returned value will be a two member ``tuple`` of unencoded ``unicode`` strings. """ if not isinstance(value, (tuple, list)): raise TypeError("Kerning key must be a tuple instance, not %s." % type(value).__name__) if len(value) != 2: raise ValueError("Kerning key must be a tuple containing two items, " "not %d." % len(value)) for v in value: if not isinstance(v, basestring): raise TypeError("Kerning key items must be strings, not %s." % type(v).__name__) if len(v) < 1: raise ValueError("Kerning key items must be one character long") if value[0].startswith("public.") and not value[0].startswith( "public.kern1."): raise ValueError("Left Kerning key group must start with " "public.kern1.") if value[1].startswith("public.") and not value[1].startswith( "public.kern2."): raise ValueError("Right Kerning key group must start with " "public.kern2.") return tuple([unicode(v) for v in value])
def function[normalizeKerningKey, parameter[value]]: constant[ Normalizes kerning key. * **value** must be a ``tuple`` or ``list``. * **value** must contain only two members. * **value** items must be :ref:`type-string`. * **value** items must be at least one character long. * Returned value will be a two member ``tuple`` of unencoded ``unicode`` strings. ] if <ast.UnaryOp object at 0x7da20c990b50> begin[:] <ast.Raise object at 0x7da20c9938e0> if compare[call[name[len], parameter[name[value]]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da20c990b80> for taget[name[v]] in starred[name[value]] begin[:] if <ast.UnaryOp object at 0x7da20c992020> begin[:] <ast.Raise object at 0x7da20c993370> if compare[call[name[len], parameter[name[v]]] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da20c9908b0> if <ast.BoolOp object at 0x7da20c990fa0> begin[:] <ast.Raise object at 0x7da20c993640> if <ast.BoolOp object at 0x7da20c990880> begin[:] <ast.Raise object at 0x7da20c991780> return[call[name[tuple], parameter[<ast.ListComp object at 0x7da20c992320>]]]
keyword[def] identifier[normalizeKerningKey] ( identifier[value] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] )): keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[value] ). identifier[__name__] ) keyword[if] identifier[len] ( identifier[value] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] % identifier[len] ( identifier[value] )) keyword[for] identifier[v] keyword[in] identifier[value] : keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[v] ). identifier[__name__] ) keyword[if] identifier[len] ( identifier[v] )< literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[value] [ literal[int] ]. identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[value] [ literal[int] ]. identifier[startswith] ( literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[value] [ literal[int] ]. identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[value] [ literal[int] ]. identifier[startswith] ( literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[return] identifier[tuple] ([ identifier[unicode] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] ])
def normalizeKerningKey(value): """ Normalizes kerning key. * **value** must be a ``tuple`` or ``list``. * **value** must contain only two members. * **value** items must be :ref:`type-string`. * **value** items must be at least one character long. * Returned value will be a two member ``tuple`` of unencoded ``unicode`` strings. """ if not isinstance(value, (tuple, list)): raise TypeError('Kerning key must be a tuple instance, not %s.' % type(value).__name__) # depends on [control=['if'], data=[]] if len(value) != 2: raise ValueError('Kerning key must be a tuple containing two items, not %d.' % len(value)) # depends on [control=['if'], data=[]] for v in value: if not isinstance(v, basestring): raise TypeError('Kerning key items must be strings, not %s.' % type(v).__name__) # depends on [control=['if'], data=[]] if len(v) < 1: raise ValueError('Kerning key items must be one character long') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] if value[0].startswith('public.') and (not value[0].startswith('public.kern1.')): raise ValueError('Left Kerning key group must start with public.kern1.') # depends on [control=['if'], data=[]] if value[1].startswith('public.') and (not value[1].startswith('public.kern2.')): raise ValueError('Right Kerning key group must start with public.kern2.') # depends on [control=['if'], data=[]] return tuple([unicode(v) for v in value])
def remove_files(filename): # type: (AnyStr) -> None """ Delete all files with same root as fileName, i.e. regardless of suffix, such as ESRI shapefile """ pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): os.remove(f)
def function[remove_files, parameter[filename]]: constant[ Delete all files with same root as fileName, i.e. regardless of suffix, such as ESRI shapefile ] variable[pattern] assign[=] binary_operation[call[call[name[os].path.splitext, parameter[name[filename]]]][constant[0]] + constant[.*]] for taget[name[f]] in starred[call[name[glob].iglob, parameter[name[pattern]]]] begin[:] call[name[os].remove, parameter[name[f]]]
keyword[def] identifier[remove_files] ( identifier[filename] ): literal[string] identifier[pattern] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]+ literal[string] keyword[for] identifier[f] keyword[in] identifier[glob] . identifier[iglob] ( identifier[pattern] ): identifier[os] . identifier[remove] ( identifier[f] )
def remove_files(filename): # type: (AnyStr) -> None '\n Delete all files with same root as fileName,\n i.e. regardless of suffix, such as ESRI shapefile\n ' pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): os.remove(f) # depends on [control=['for'], data=['f']]
def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular', smoothness=None, guarantee=False): r"""Compute a fuzzy histogram. The percentage of a value's membership in a bin is computed using the selected membership function. This functions stays as near as possible to the `numpy.histogram` behaviour. Parameters ---------- a : array_like Input data; The histogram is computed over the flattened array (with ravel()). bins : int The number of equal-width bins in the given range (10, by default). range : (float, float) The lower and upper range of the bins; If not provided, range is simply (a.min(), a.max()); Values outside the range are ignored. normed : bool If False, the result will contain the number of samples in each bin; If True, the result is the value of the probability density function at the bin, normalized such that the integral over the range is 1. membership : string Select the type of the fuzzy membership function; See package description for available options. smoothness : float The smoothness of the fuzzy function; See package description and the membership functions for more details. guarantee : bool Guarantee that all values contribute equally to the histogram; when this value is set, the range term is ignored; see package descriptions for details. Returns ------- hist : array The values of the histogram. See normed and weights for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges (length(hist)+1). Notes ----- See package description for more details on the usage. Examples -------- >>> import numpy as np >>> from medpy.features import fuzzy_histogram >>> a = np.asarray([1,2,3,3.2,3.4,3.5,7.5,7.6,7.8,8,9,10]) >>> np.histogram(a, bins=4) (array([4, 2, 2, 4]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4) (array([ 3.4 , 2.04444444, 2.04444444, 3.4 ]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4, membership='sigmoid') (array([ 3.34304743, 2.15613626, 2.15613626, 3.34304743]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) """ # check and prepare parameters a = scipy.asarray(a).ravel() if None == range: range = (a.min(), a.max()) if range[1] <= range[0]: raise AttributeError('max must be larger than min in range parameter.') if not int == type(bins): raise AttributeError('bins must an integer.') if bins <= 0: raise AttributeError('bins must greater than zero.') if membership not in __MBS: raise AttributeError('Unknown type: {}. Must be one of {}.'.format(membership, __MBS)) if not None == smoothness and smoothness <= 0.0: raise AttributeError('smoothness must be greater than zero.') # set default smoothness values if None == smoothness: smoothness = 0.25 if 'trapezoid' == membership else 0.5 if not guarantee: # compute bin distribution in no guarantee case binw = (range[1] - range[0]) / float(bins) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) else: # compute bin distribution for guarantee case bins_core = bins - 2 * int(math.ceil(smoothness)) if bins_core <= 0: raise AttributeError('bins to few to guarantee removing boundary effect.') binw = (range[1] - range[0]) / float(bins_core) range = (range[0] - int(math.ceil(smoothness)) * binw, range[1] + int(math.ceil(smoothness)) * binw) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) # create membership function (centered at 0) if 'triangular' == membership: membership = triangular_membership(0, binw, smoothness) elif 'trapezoid' == membership: membership = trapezoid_membership(0, binw, smoothness) elif 'gaussian' == membership: membership = gaussian_membership(0, binw, smoothness) elif 'sigmoid' == membership: membership = sigmoidal_difference_membership(0, binw, smoothness) # compute histogram i.e. memberships of values across neighbourhood (determined by smoothness) neighbourhood = int(math.ceil(smoothness)) l = len(bins) - 2 histogram = scipy.zeros(l + 1) m = range[0] for v in a: # for each value idx = min(l, int((v - m) / binw)) for i in scipy.arange(max(0, idx - neighbourhood), min(l + 1, idx + neighbourhood + 1)): # for crips bin neighbourhood start = bins[i] histogram[i] += membership(v - start - 0.5 * binw) # adjust v for evaluation on zero-centered membership function # normalize if normed: histogram /= float(sum(histogram)) return histogram, bins
def function[fuzzy_histogram, parameter[a, bins, range, normed, membership, smoothness, guarantee]]: constant[Compute a fuzzy histogram. The percentage of a value's membership in a bin is computed using the selected membership function. This functions stays as near as possible to the `numpy.histogram` behaviour. Parameters ---------- a : array_like Input data; The histogram is computed over the flattened array (with ravel()). bins : int The number of equal-width bins in the given range (10, by default). range : (float, float) The lower and upper range of the bins; If not provided, range is simply (a.min(), a.max()); Values outside the range are ignored. normed : bool If False, the result will contain the number of samples in each bin; If True, the result is the value of the probability density function at the bin, normalized such that the integral over the range is 1. membership : string Select the type of the fuzzy membership function; See package description for available options. smoothness : float The smoothness of the fuzzy function; See package description and the membership functions for more details. guarantee : bool Guarantee that all values contribute equally to the histogram; when this value is set, the range term is ignored; see package descriptions for details. Returns ------- hist : array The values of the histogram. See normed and weights for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges (length(hist)+1). Notes ----- See package description for more details on the usage. Examples -------- >>> import numpy as np >>> from medpy.features import fuzzy_histogram >>> a = np.asarray([1,2,3,3.2,3.4,3.5,7.5,7.6,7.8,8,9,10]) >>> np.histogram(a, bins=4) (array([4, 2, 2, 4]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4) (array([ 3.4 , 2.04444444, 2.04444444, 3.4 ]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4, membership='sigmoid') (array([ 3.34304743, 2.15613626, 2.15613626, 3.34304743]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) ] variable[a] assign[=] call[call[name[scipy].asarray, parameter[name[a]]].ravel, parameter[]] if compare[constant[None] equal[==] name[range]] begin[:] variable[range] assign[=] tuple[[<ast.Call object at 0x7da204962ce0>, <ast.Call object at 0x7da2049632e0>]] if compare[call[name[range]][constant[1]] less_or_equal[<=] call[name[range]][constant[0]]] begin[:] <ast.Raise object at 0x7da204963070> if <ast.UnaryOp object at 0x7da2049623b0> begin[:] <ast.Raise object at 0x7da204963f40> if compare[name[bins] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da204960460> if compare[name[membership] <ast.NotIn object at 0x7da2590d7190> name[__MBS]] begin[:] <ast.Raise object at 0x7da204960610> if <ast.BoolOp object at 0x7da204961030> begin[:] <ast.Raise object at 0x7da2049624d0> if compare[constant[None] equal[==] name[smoothness]] begin[:] variable[smoothness] assign[=] <ast.IfExp object at 0x7da204961d20> if <ast.UnaryOp object at 0x7da204962950> begin[:] variable[binw] assign[=] binary_operation[binary_operation[call[name[range]][constant[1]] - call[name[range]][constant[0]]] / call[name[float], parameter[name[bins]]]] variable[bins] assign[=] call[name[scipy].asarray, parameter[<ast.ListComp object at 0x7da18bc72440>]] if compare[constant[triangular] equal[==] name[membership]] begin[:] variable[membership] assign[=] call[name[triangular_membership], parameter[constant[0], name[binw], name[smoothness]]] variable[neighbourhood] assign[=] call[name[int], parameter[call[name[math].ceil, parameter[name[smoothness]]]]] variable[l] assign[=] binary_operation[call[name[len], parameter[name[bins]]] - constant[2]] variable[histogram] assign[=] call[name[scipy].zeros, parameter[binary_operation[name[l] + constant[1]]]] variable[m] assign[=] call[name[range]][constant[0]] for taget[name[v]] in starred[name[a]] begin[:] variable[idx] assign[=] call[name[min], parameter[name[l], call[name[int], parameter[binary_operation[binary_operation[name[v] - name[m]] / name[binw]]]]]] for taget[name[i]] in starred[call[name[scipy].arange, parameter[call[name[max], parameter[constant[0], binary_operation[name[idx] - name[neighbourhood]]]], call[name[min], parameter[binary_operation[name[l] + constant[1]], binary_operation[binary_operation[name[idx] + name[neighbourhood]] + constant[1]]]]]]] begin[:] variable[start] assign[=] call[name[bins]][name[i]] <ast.AugAssign object at 0x7da18bc70970> if name[normed] begin[:] <ast.AugAssign object at 0x7da18bc70730> return[tuple[[<ast.Name object at 0x7da18bc705e0>, <ast.Name object at 0x7da18bc73f40>]]]
keyword[def] identifier[fuzzy_histogram] ( identifier[a] , identifier[bins] = literal[int] , identifier[range] = keyword[None] , identifier[normed] = keyword[False] , identifier[membership] = literal[string] , identifier[smoothness] = keyword[None] , identifier[guarantee] = keyword[False] ): literal[string] identifier[a] = identifier[scipy] . identifier[asarray] ( identifier[a] ). identifier[ravel] () keyword[if] keyword[None] == identifier[range] : identifier[range] =( identifier[a] . identifier[min] (), identifier[a] . identifier[max] ()) keyword[if] identifier[range] [ literal[int] ]<= identifier[range] [ literal[int] ]: keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] keyword[not] identifier[int] == identifier[type] ( identifier[bins] ): keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] identifier[bins] <= literal[int] : keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] identifier[membership] keyword[not] keyword[in] identifier[__MBS] : keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[membership] , identifier[__MBS] )) keyword[if] keyword[not] keyword[None] == identifier[smoothness] keyword[and] identifier[smoothness] <= literal[int] : keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] keyword[None] == identifier[smoothness] : identifier[smoothness] = literal[int] keyword[if] literal[string] == identifier[membership] keyword[else] literal[int] keyword[if] keyword[not] identifier[guarantee] : identifier[binw] =( identifier[range] [ literal[int] ]- identifier[range] [ literal[int] ])/ identifier[float] ( identifier[bins] ) identifier[bins] = identifier[scipy] . identifier[asarray] ([ identifier[i] * identifier[binw] + identifier[range] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[scipy] . identifier[arange] ( identifier[bins] + literal[int] )]) keyword[else] : identifier[bins_core] = identifier[bins] - literal[int] * identifier[int] ( identifier[math] . identifier[ceil] ( identifier[smoothness] )) keyword[if] identifier[bins_core] <= literal[int] : keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[binw] =( identifier[range] [ literal[int] ]- identifier[range] [ literal[int] ])/ identifier[float] ( identifier[bins_core] ) identifier[range] =( identifier[range] [ literal[int] ]- identifier[int] ( identifier[math] . identifier[ceil] ( identifier[smoothness] ))* identifier[binw] , identifier[range] [ literal[int] ]+ identifier[int] ( identifier[math] . identifier[ceil] ( identifier[smoothness] ))* identifier[binw] ) identifier[bins] = identifier[scipy] . identifier[asarray] ([ identifier[i] * identifier[binw] + identifier[range] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[scipy] . identifier[arange] ( identifier[bins] + literal[int] )]) keyword[if] literal[string] == identifier[membership] : identifier[membership] = identifier[triangular_membership] ( literal[int] , identifier[binw] , identifier[smoothness] ) keyword[elif] literal[string] == identifier[membership] : identifier[membership] = identifier[trapezoid_membership] ( literal[int] , identifier[binw] , identifier[smoothness] ) keyword[elif] literal[string] == identifier[membership] : identifier[membership] = identifier[gaussian_membership] ( literal[int] , identifier[binw] , identifier[smoothness] ) keyword[elif] literal[string] == identifier[membership] : identifier[membership] = identifier[sigmoidal_difference_membership] ( literal[int] , identifier[binw] , identifier[smoothness] ) identifier[neighbourhood] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[smoothness] )) identifier[l] = identifier[len] ( identifier[bins] )- literal[int] identifier[histogram] = identifier[scipy] . identifier[zeros] ( identifier[l] + literal[int] ) identifier[m] = identifier[range] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[a] : identifier[idx] = identifier[min] ( identifier[l] , identifier[int] (( identifier[v] - identifier[m] )/ identifier[binw] )) keyword[for] identifier[i] keyword[in] identifier[scipy] . identifier[arange] ( identifier[max] ( literal[int] , identifier[idx] - identifier[neighbourhood] ), identifier[min] ( identifier[l] + literal[int] , identifier[idx] + identifier[neighbourhood] + literal[int] )): identifier[start] = identifier[bins] [ identifier[i] ] identifier[histogram] [ identifier[i] ]+= identifier[membership] ( identifier[v] - identifier[start] - literal[int] * identifier[binw] ) keyword[if] identifier[normed] : identifier[histogram] /= identifier[float] ( identifier[sum] ( identifier[histogram] )) keyword[return] identifier[histogram] , identifier[bins]
def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular', smoothness=None, guarantee=False): """Compute a fuzzy histogram. The percentage of a value's membership in a bin is computed using the selected membership function. This functions stays as near as possible to the `numpy.histogram` behaviour. Parameters ---------- a : array_like Input data; The histogram is computed over the flattened array (with ravel()). bins : int The number of equal-width bins in the given range (10, by default). range : (float, float) The lower and upper range of the bins; If not provided, range is simply (a.min(), a.max()); Values outside the range are ignored. normed : bool If False, the result will contain the number of samples in each bin; If True, the result is the value of the probability density function at the bin, normalized such that the integral over the range is 1. membership : string Select the type of the fuzzy membership function; See package description for available options. smoothness : float The smoothness of the fuzzy function; See package description and the membership functions for more details. guarantee : bool Guarantee that all values contribute equally to the histogram; when this value is set, the range term is ignored; see package descriptions for details. Returns ------- hist : array The values of the histogram. See normed and weights for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges (length(hist)+1). Notes ----- See package description for more details on the usage. Examples -------- >>> import numpy as np >>> from medpy.features import fuzzy_histogram >>> a = np.asarray([1,2,3,3.2,3.4,3.5,7.5,7.6,7.8,8,9,10]) >>> np.histogram(a, bins=4) (array([4, 2, 2, 4]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4) (array([ 3.4 , 2.04444444, 2.04444444, 3.4 ]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4, membership='sigmoid') (array([ 3.34304743, 2.15613626, 2.15613626, 3.34304743]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) """ # check and prepare parameters a = scipy.asarray(a).ravel() if None == range: range = (a.min(), a.max()) # depends on [control=['if'], data=['range']] if range[1] <= range[0]: raise AttributeError('max must be larger than min in range parameter.') # depends on [control=['if'], data=[]] if not int == type(bins): raise AttributeError('bins must an integer.') # depends on [control=['if'], data=[]] if bins <= 0: raise AttributeError('bins must greater than zero.') # depends on [control=['if'], data=[]] if membership not in __MBS: raise AttributeError('Unknown type: {}. Must be one of {}.'.format(membership, __MBS)) # depends on [control=['if'], data=['membership', '__MBS']] if not None == smoothness and smoothness <= 0.0: raise AttributeError('smoothness must be greater than zero.') # depends on [control=['if'], data=[]] # set default smoothness values if None == smoothness: smoothness = 0.25 if 'trapezoid' == membership else 0.5 # depends on [control=['if'], data=['smoothness']] if not guarantee: # compute bin distribution in no guarantee case binw = (range[1] - range[0]) / float(bins) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) # depends on [control=['if'], data=[]] else: # compute bin distribution for guarantee case bins_core = bins - 2 * int(math.ceil(smoothness)) if bins_core <= 0: raise AttributeError('bins to few to guarantee removing boundary effect.') # depends on [control=['if'], data=[]] binw = (range[1] - range[0]) / float(bins_core) range = (range[0] - int(math.ceil(smoothness)) * binw, range[1] + int(math.ceil(smoothness)) * binw) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) # create membership function (centered at 0) if 'triangular' == membership: membership = triangular_membership(0, binw, smoothness) # depends on [control=['if'], data=['membership']] elif 'trapezoid' == membership: membership = trapezoid_membership(0, binw, smoothness) # depends on [control=['if'], data=['membership']] elif 'gaussian' == membership: membership = gaussian_membership(0, binw, smoothness) # depends on [control=['if'], data=['membership']] elif 'sigmoid' == membership: membership = sigmoidal_difference_membership(0, binw, smoothness) # depends on [control=['if'], data=['membership']] # compute histogram i.e. memberships of values across neighbourhood (determined by smoothness) neighbourhood = int(math.ceil(smoothness)) l = len(bins) - 2 histogram = scipy.zeros(l + 1) m = range[0] for v in a: # for each value idx = min(l, int((v - m) / binw)) for i in scipy.arange(max(0, idx - neighbourhood), min(l + 1, idx + neighbourhood + 1)): # for crips bin neighbourhood start = bins[i] histogram[i] += membership(v - start - 0.5 * binw) # adjust v for evaluation on zero-centered membership function # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['v']] # normalize if normed: histogram /= float(sum(histogram)) # depends on [control=['if'], data=[]] return (histogram, bins)
def reset_annotations(self): """Resets the builder's state to allow building new annotations.""" # FIXME: this state does not make sense self.annotation_date_set = False self.annotation_comment_set = False self.annotation_type_set = False self.annotation_spdx_id_set = False
def function[reset_annotations, parameter[self]]: constant[Resets the builder's state to allow building new annotations.] name[self].annotation_date_set assign[=] constant[False] name[self].annotation_comment_set assign[=] constant[False] name[self].annotation_type_set assign[=] constant[False] name[self].annotation_spdx_id_set assign[=] constant[False]
keyword[def] identifier[reset_annotations] ( identifier[self] ): literal[string] identifier[self] . identifier[annotation_date_set] = keyword[False] identifier[self] . identifier[annotation_comment_set] = keyword[False] identifier[self] . identifier[annotation_type_set] = keyword[False] identifier[self] . identifier[annotation_spdx_id_set] = keyword[False]
def reset_annotations(self): """Resets the builder's state to allow building new annotations.""" # FIXME: this state does not make sense self.annotation_date_set = False self.annotation_comment_set = False self.annotation_type_set = False self.annotation_spdx_id_set = False
def __feed_arthur(self): """ Feed Ocean with backend data collected from arthur redis queue""" with self.ARTHUR_FEED_LOCK: # This is a expensive operation so don't do it always if (time.time() - self.ARTHUR_LAST_MEMORY_CHECK) > 5 * self.ARTHUR_LAST_MEMORY_CHECK_TIME: self.ARTHUR_LAST_MEMORY_CHECK = time.time() logger.debug("Measuring the memory used by the raw items dict ...") try: memory_size = self.measure_memory(self.arthur_items) / (1024 * 1024) except RuntimeError as ex: # During memory usage measure, other thread could change the dict logger.warning("Can't get the memory used by the raw items dict: %s", ex) memory_size = self.ARTHUR_LAST_MEMORY_SIZE self.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECK logger.debug("Arthur items memory size: %0.2f MB (%is to check)", memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME) self.ARTHUR_LAST_MEMORY_SIZE = memory_size # Don't feed items from redis if the current python dict is # larger than ARTHUR_MAX_MEMORY_SIZE if self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE: logger.debug("Items queue full. Not collecting items from redis queue.") return logger.info("Collecting items from redis queue") db_url = self.config.get_conf()['es_collection']['redis_url'] conn = redis.StrictRedis.from_url(db_url) logger.debug("Redis connection stablished with %s.", db_url) # Get and remove queued items in an atomic transaction pipe = conn.pipeline() # pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.lrange(Q_STORAGE_ITEMS, 0, self.ARTHUR_REDIS_ITEMS - 1) pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -1) items = pipe.execute()[0] for item in items: arthur_item = pickle.loads(item) if arthur_item['tag'] not in self.arthur_items: self.arthur_items[arthur_item['tag']] = [] self.arthur_items[arthur_item['tag']].append(arthur_item) for tag in self.arthur_items: if self.arthur_items[tag]: logger.debug("Arthur items for %s: %i", tag, len(self.arthur_items[tag]))
def function[__feed_arthur, parameter[self]]: constant[ Feed Ocean with backend data collected from arthur redis queue] with name[self].ARTHUR_FEED_LOCK begin[:] if compare[binary_operation[call[name[time].time, parameter[]] - name[self].ARTHUR_LAST_MEMORY_CHECK] greater[>] binary_operation[constant[5] * name[self].ARTHUR_LAST_MEMORY_CHECK_TIME]] begin[:] name[self].ARTHUR_LAST_MEMORY_CHECK assign[=] call[name[time].time, parameter[]] call[name[logger].debug, parameter[constant[Measuring the memory used by the raw items dict ...]]] <ast.Try object at 0x7da1b012ef20> name[self].ARTHUR_LAST_MEMORY_CHECK_TIME assign[=] binary_operation[call[name[time].time, parameter[]] - name[self].ARTHUR_LAST_MEMORY_CHECK] call[name[logger].debug, parameter[constant[Arthur items memory size: %0.2f MB (%is to check)], name[memory_size], name[self].ARTHUR_LAST_MEMORY_CHECK_TIME]] name[self].ARTHUR_LAST_MEMORY_SIZE assign[=] name[memory_size] if compare[name[self].ARTHUR_LAST_MEMORY_SIZE greater[>] name[self].ARTHUR_MAX_MEMORY_SIZE] begin[:] call[name[logger].debug, parameter[constant[Items queue full. Not collecting items from redis queue.]]] return[None] call[name[logger].info, parameter[constant[Collecting items from redis queue]]] variable[db_url] assign[=] call[call[call[name[self].config.get_conf, parameter[]]][constant[es_collection]]][constant[redis_url]] variable[conn] assign[=] call[name[redis].StrictRedis.from_url, parameter[name[db_url]]] call[name[logger].debug, parameter[constant[Redis connection stablished with %s.], name[db_url]]] variable[pipe] assign[=] call[name[conn].pipeline, parameter[]] call[name[pipe].lrange, parameter[name[Q_STORAGE_ITEMS], constant[0], binary_operation[name[self].ARTHUR_REDIS_ITEMS - constant[1]]]] call[name[pipe].ltrim, parameter[name[Q_STORAGE_ITEMS], name[self].ARTHUR_REDIS_ITEMS, <ast.UnaryOp object at 0x7da1b012c9d0>]] variable[items] assign[=] call[call[name[pipe].execute, parameter[]]][constant[0]] for taget[name[item]] in starred[name[items]] begin[:] variable[arthur_item] assign[=] call[name[pickle].loads, parameter[name[item]]] if compare[call[name[arthur_item]][constant[tag]] <ast.NotIn object at 0x7da2590d7190> name[self].arthur_items] begin[:] call[name[self].arthur_items][call[name[arthur_item]][constant[tag]]] assign[=] list[[]] call[call[name[self].arthur_items][call[name[arthur_item]][constant[tag]]].append, parameter[name[arthur_item]]] for taget[name[tag]] in starred[name[self].arthur_items] begin[:] if call[name[self].arthur_items][name[tag]] begin[:] call[name[logger].debug, parameter[constant[Arthur items for %s: %i], name[tag], call[name[len], parameter[call[name[self].arthur_items][name[tag]]]]]]
keyword[def] identifier[__feed_arthur] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[ARTHUR_FEED_LOCK] : keyword[if] ( identifier[time] . identifier[time] ()- identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK] )> literal[int] * identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK_TIME] : identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK] = identifier[time] . identifier[time] () identifier[logger] . identifier[debug] ( literal[string] ) keyword[try] : identifier[memory_size] = identifier[self] . identifier[measure_memory] ( identifier[self] . identifier[arthur_items] )/( literal[int] * literal[int] ) keyword[except] identifier[RuntimeError] keyword[as] identifier[ex] : identifier[logger] . identifier[warning] ( literal[string] , identifier[ex] ) identifier[memory_size] = identifier[self] . identifier[ARTHUR_LAST_MEMORY_SIZE] identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK_TIME] = identifier[time] . identifier[time] ()- identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK] identifier[logger] . identifier[debug] ( literal[string] , identifier[memory_size] , identifier[self] . identifier[ARTHUR_LAST_MEMORY_CHECK_TIME] ) identifier[self] . identifier[ARTHUR_LAST_MEMORY_SIZE] = identifier[memory_size] keyword[if] identifier[self] . identifier[ARTHUR_LAST_MEMORY_SIZE] > identifier[self] . identifier[ARTHUR_MAX_MEMORY_SIZE] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[logger] . identifier[info] ( literal[string] ) identifier[db_url] = identifier[self] . identifier[config] . identifier[get_conf] ()[ literal[string] ][ literal[string] ] identifier[conn] = identifier[redis] . identifier[StrictRedis] . identifier[from_url] ( identifier[db_url] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[db_url] ) identifier[pipe] = identifier[conn] . identifier[pipeline] () identifier[pipe] . identifier[lrange] ( identifier[Q_STORAGE_ITEMS] , literal[int] , identifier[self] . identifier[ARTHUR_REDIS_ITEMS] - literal[int] ) identifier[pipe] . identifier[ltrim] ( identifier[Q_STORAGE_ITEMS] , identifier[self] . identifier[ARTHUR_REDIS_ITEMS] ,- literal[int] ) identifier[items] = identifier[pipe] . identifier[execute] ()[ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[items] : identifier[arthur_item] = identifier[pickle] . identifier[loads] ( identifier[item] ) keyword[if] identifier[arthur_item] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[arthur_items] : identifier[self] . identifier[arthur_items] [ identifier[arthur_item] [ literal[string] ]]=[] identifier[self] . identifier[arthur_items] [ identifier[arthur_item] [ literal[string] ]]. identifier[append] ( identifier[arthur_item] ) keyword[for] identifier[tag] keyword[in] identifier[self] . identifier[arthur_items] : keyword[if] identifier[self] . identifier[arthur_items] [ identifier[tag] ]: identifier[logger] . identifier[debug] ( literal[string] , identifier[tag] , identifier[len] ( identifier[self] . identifier[arthur_items] [ identifier[tag] ]))
def __feed_arthur(self): """ Feed Ocean with backend data collected from arthur redis queue""" with self.ARTHUR_FEED_LOCK: # This is a expensive operation so don't do it always if time.time() - self.ARTHUR_LAST_MEMORY_CHECK > 5 * self.ARTHUR_LAST_MEMORY_CHECK_TIME: self.ARTHUR_LAST_MEMORY_CHECK = time.time() logger.debug('Measuring the memory used by the raw items dict ...') try: memory_size = self.measure_memory(self.arthur_items) / (1024 * 1024) # depends on [control=['try'], data=[]] except RuntimeError as ex: # During memory usage measure, other thread could change the dict logger.warning("Can't get the memory used by the raw items dict: %s", ex) memory_size = self.ARTHUR_LAST_MEMORY_SIZE # depends on [control=['except'], data=['ex']] self.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECK logger.debug('Arthur items memory size: %0.2f MB (%is to check)', memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME) self.ARTHUR_LAST_MEMORY_SIZE = memory_size # depends on [control=['if'], data=[]] # Don't feed items from redis if the current python dict is # larger than ARTHUR_MAX_MEMORY_SIZE if self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE: logger.debug('Items queue full. Not collecting items from redis queue.') return # depends on [control=['if'], data=[]] logger.info('Collecting items from redis queue') db_url = self.config.get_conf()['es_collection']['redis_url'] conn = redis.StrictRedis.from_url(db_url) logger.debug('Redis connection stablished with %s.', db_url) # Get and remove queued items in an atomic transaction pipe = conn.pipeline() # pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.lrange(Q_STORAGE_ITEMS, 0, self.ARTHUR_REDIS_ITEMS - 1) pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -1) items = pipe.execute()[0] for item in items: arthur_item = pickle.loads(item) if arthur_item['tag'] not in self.arthur_items: self.arthur_items[arthur_item['tag']] = [] # depends on [control=['if'], data=[]] self.arthur_items[arthur_item['tag']].append(arthur_item) # depends on [control=['for'], data=['item']] for tag in self.arthur_items: if self.arthur_items[tag]: logger.debug('Arthur items for %s: %i', tag, len(self.arthur_items[tag])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']] # depends on [control=['with'], data=[]]
def process(self, blob=None): """Pump the next blob to the modules""" try: blob = self.get_blob(self.index) except IndexError: self.log.info("Got an IndexError, trying the next file") if (self.basename or self.filenames) and self.file_index < self.index_stop: self.file_index += 1 self.log.info("Now at file_index={}".format(self.file_index)) self._reset() self.blob_file.close() self.log.info("Resetting blob index to 0") self.index = 0 file_index = self._get_file_index_str() if self.filenames: self.filename = self.filenames[self.file_index - 1] elif self.basename: self.filename = "{}{}{}.evt" \ .format(self.basename, file_index, self.suffix) self.log.info("Next filename: {}".format(self.filename)) self.print("Opening {0}".format(self.filename)) self.open_file(self.filename) self.prepare_blobs() try: blob = self.get_blob(self.index) except IndexError: self.log.warning( "No blob found in file {}".format(self.filename) ) else: return blob self.log.info("No files left, terminating the pipeline") raise StopIteration self.index += 1 return blob
def function[process, parameter[self, blob]]: constant[Pump the next blob to the modules] <ast.Try object at 0x7da18f811000> <ast.AugAssign object at 0x7da18f811390> return[name[blob]]
keyword[def] identifier[process] ( identifier[self] , identifier[blob] = keyword[None] ): literal[string] keyword[try] : identifier[blob] = identifier[self] . identifier[get_blob] ( identifier[self] . identifier[index] ) keyword[except] identifier[IndexError] : identifier[self] . identifier[log] . identifier[info] ( literal[string] ) keyword[if] ( identifier[self] . identifier[basename] keyword[or] identifier[self] . identifier[filenames] ) keyword[and] identifier[self] . identifier[file_index] < identifier[self] . identifier[index_stop] : identifier[self] . identifier[file_index] += literal[int] identifier[self] . identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[file_index] )) identifier[self] . identifier[_reset] () identifier[self] . identifier[blob_file] . identifier[close] () identifier[self] . identifier[log] . identifier[info] ( literal[string] ) identifier[self] . identifier[index] = literal[int] identifier[file_index] = identifier[self] . identifier[_get_file_index_str] () keyword[if] identifier[self] . identifier[filenames] : identifier[self] . identifier[filename] = identifier[self] . identifier[filenames] [ identifier[self] . identifier[file_index] - literal[int] ] keyword[elif] identifier[self] . identifier[basename] : identifier[self] . identifier[filename] = literal[string] . identifier[format] ( identifier[self] . identifier[basename] , identifier[file_index] , identifier[self] . identifier[suffix] ) identifier[self] . identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[filename] )) identifier[self] . identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[filename] )) identifier[self] . identifier[open_file] ( identifier[self] . identifier[filename] ) identifier[self] . identifier[prepare_blobs] () keyword[try] : identifier[blob] = identifier[self] . identifier[get_blob] ( identifier[self] . identifier[index] ) keyword[except] identifier[IndexError] : identifier[self] . identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[self] . identifier[filename] ) ) keyword[else] : keyword[return] identifier[blob] identifier[self] . identifier[log] . identifier[info] ( literal[string] ) keyword[raise] identifier[StopIteration] identifier[self] . identifier[index] += literal[int] keyword[return] identifier[blob]
def process(self, blob=None): """Pump the next blob to the modules""" try: blob = self.get_blob(self.index) # depends on [control=['try'], data=[]] except IndexError: self.log.info('Got an IndexError, trying the next file') if (self.basename or self.filenames) and self.file_index < self.index_stop: self.file_index += 1 self.log.info('Now at file_index={}'.format(self.file_index)) self._reset() self.blob_file.close() self.log.info('Resetting blob index to 0') self.index = 0 file_index = self._get_file_index_str() if self.filenames: self.filename = self.filenames[self.file_index - 1] # depends on [control=['if'], data=[]] elif self.basename: self.filename = '{}{}{}.evt'.format(self.basename, file_index, self.suffix) # depends on [control=['if'], data=[]] self.log.info('Next filename: {}'.format(self.filename)) self.print('Opening {0}'.format(self.filename)) self.open_file(self.filename) self.prepare_blobs() try: blob = self.get_blob(self.index) # depends on [control=['try'], data=[]] except IndexError: self.log.warning('No blob found in file {}'.format(self.filename)) # depends on [control=['except'], data=[]] else: return blob # depends on [control=['if'], data=[]] self.log.info('No files left, terminating the pipeline') raise StopIteration # depends on [control=['except'], data=[]] self.index += 1 return blob
def global_variables(self): """ Return an iterator over this module's global variables. The iterator will yield a ValueRef for each global variable. Note that global variables don't include functions (a function is a "global value" but not a "global variable" in LLVM parlance) """ it = ffi.lib.LLVMPY_ModuleGlobalsIter(self) return _GlobalsIterator(it, dict(module=self))
def function[global_variables, parameter[self]]: constant[ Return an iterator over this module's global variables. The iterator will yield a ValueRef for each global variable. Note that global variables don't include functions (a function is a "global value" but not a "global variable" in LLVM parlance) ] variable[it] assign[=] call[name[ffi].lib.LLVMPY_ModuleGlobalsIter, parameter[name[self]]] return[call[name[_GlobalsIterator], parameter[name[it], call[name[dict], parameter[]]]]]
keyword[def] identifier[global_variables] ( identifier[self] ): literal[string] identifier[it] = identifier[ffi] . identifier[lib] . identifier[LLVMPY_ModuleGlobalsIter] ( identifier[self] ) keyword[return] identifier[_GlobalsIterator] ( identifier[it] , identifier[dict] ( identifier[module] = identifier[self] ))
def global_variables(self): """ Return an iterator over this module's global variables. The iterator will yield a ValueRef for each global variable. Note that global variables don't include functions (a function is a "global value" but not a "global variable" in LLVM parlance) """ it = ffi.lib.LLVMPY_ModuleGlobalsIter(self) return _GlobalsIterator(it, dict(module=self))
def invalidate_config_var_entry(self, index): """Mark a config variable as invalid.""" if index == 0 or index > len(self.config_database.entries): return [Error.INVALID_ARRAY_KEY, b''] entry = self.config_database.entries[index - 1] if not entry.valid: return [ConfigDatabaseError.OBSOLETE_ENTRY, b''] entry.valid = False return [Error.NO_ERROR]
def function[invalidate_config_var_entry, parameter[self, index]]: constant[Mark a config variable as invalid.] if <ast.BoolOp object at 0x7da20c6a8d60> begin[:] return[list[[<ast.Attribute object at 0x7da20c6a8a30>, <ast.Constant object at 0x7da20c6a9ab0>]]] variable[entry] assign[=] call[name[self].config_database.entries][binary_operation[name[index] - constant[1]]] if <ast.UnaryOp object at 0x7da20c6a99f0> begin[:] return[list[[<ast.Attribute object at 0x7da20c6a8c10>, <ast.Constant object at 0x7da20c6a8fa0>]]] name[entry].valid assign[=] constant[False] return[list[[<ast.Attribute object at 0x7da20c76efe0>]]]
keyword[def] identifier[invalidate_config_var_entry] ( identifier[self] , identifier[index] ): literal[string] keyword[if] identifier[index] == literal[int] keyword[or] identifier[index] > identifier[len] ( identifier[self] . identifier[config_database] . identifier[entries] ): keyword[return] [ identifier[Error] . identifier[INVALID_ARRAY_KEY] , literal[string] ] identifier[entry] = identifier[self] . identifier[config_database] . identifier[entries] [ identifier[index] - literal[int] ] keyword[if] keyword[not] identifier[entry] . identifier[valid] : keyword[return] [ identifier[ConfigDatabaseError] . identifier[OBSOLETE_ENTRY] , literal[string] ] identifier[entry] . identifier[valid] = keyword[False] keyword[return] [ identifier[Error] . identifier[NO_ERROR] ]
def invalidate_config_var_entry(self, index): """Mark a config variable as invalid.""" if index == 0 or index > len(self.config_database.entries): return [Error.INVALID_ARRAY_KEY, b''] # depends on [control=['if'], data=[]] entry = self.config_database.entries[index - 1] if not entry.valid: return [ConfigDatabaseError.OBSOLETE_ENTRY, b''] # depends on [control=['if'], data=[]] entry.valid = False return [Error.NO_ERROR]
def from_row_and_group(row: int, group: int): """ Returns an element from a row and group number. Args: row (int): Row number group (int): Group number .. note:: The 18 group number system is used, i.e., Noble gases are group 18. """ for sym in _pt_data.keys(): el = Element(sym) if el.row == row and el.group == group: return el raise ValueError("No element with this row and group!")
def function[from_row_and_group, parameter[row, group]]: constant[ Returns an element from a row and group number. Args: row (int): Row number group (int): Group number .. note:: The 18 group number system is used, i.e., Noble gases are group 18. ] for taget[name[sym]] in starred[call[name[_pt_data].keys, parameter[]]] begin[:] variable[el] assign[=] call[name[Element], parameter[name[sym]]] if <ast.BoolOp object at 0x7da204564a00> begin[:] return[name[el]] <ast.Raise object at 0x7da2041db550>
keyword[def] identifier[from_row_and_group] ( identifier[row] : identifier[int] , identifier[group] : identifier[int] ): literal[string] keyword[for] identifier[sym] keyword[in] identifier[_pt_data] . identifier[keys] (): identifier[el] = identifier[Element] ( identifier[sym] ) keyword[if] identifier[el] . identifier[row] == identifier[row] keyword[and] identifier[el] . identifier[group] == identifier[group] : keyword[return] identifier[el] keyword[raise] identifier[ValueError] ( literal[string] )
def from_row_and_group(row: int, group: int): """ Returns an element from a row and group number. Args: row (int): Row number group (int): Group number .. note:: The 18 group number system is used, i.e., Noble gases are group 18. """ for sym in _pt_data.keys(): el = Element(sym) if el.row == row and el.group == group: return el # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sym']] raise ValueError('No element with this row and group!')
def virt_conf_from_stream( self, conf_fd, template_repo=None, template_store=None, do_bootstrap=True, do_build=True, ): """ Initializes all the virt infrastructure of the prefix, creating the domains disks, doing any network leases and creating all the virt related files and dirs inside this prefix. Args: conf_fd (File): File like object to read the config from template_repo (TemplateRepository): template repository intance template_store (TemplateStore): template store instance Returns: None """ virt_conf = utils.load_virt_stream(conf_fd) LOGGER.debug('Loaded virt config:\n%s', virt_conf) return self.virt_conf( conf=virt_conf, template_repo=template_repo, template_store=template_store, do_bootstrap=do_bootstrap, do_build=do_build )
def function[virt_conf_from_stream, parameter[self, conf_fd, template_repo, template_store, do_bootstrap, do_build]]: constant[ Initializes all the virt infrastructure of the prefix, creating the domains disks, doing any network leases and creating all the virt related files and dirs inside this prefix. Args: conf_fd (File): File like object to read the config from template_repo (TemplateRepository): template repository intance template_store (TemplateStore): template store instance Returns: None ] variable[virt_conf] assign[=] call[name[utils].load_virt_stream, parameter[name[conf_fd]]] call[name[LOGGER].debug, parameter[constant[Loaded virt config: %s], name[virt_conf]]] return[call[name[self].virt_conf, parameter[]]]
keyword[def] identifier[virt_conf_from_stream] ( identifier[self] , identifier[conf_fd] , identifier[template_repo] = keyword[None] , identifier[template_store] = keyword[None] , identifier[do_bootstrap] = keyword[True] , identifier[do_build] = keyword[True] , ): literal[string] identifier[virt_conf] = identifier[utils] . identifier[load_virt_stream] ( identifier[conf_fd] ) identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[virt_conf] ) keyword[return] identifier[self] . identifier[virt_conf] ( identifier[conf] = identifier[virt_conf] , identifier[template_repo] = identifier[template_repo] , identifier[template_store] = identifier[template_store] , identifier[do_bootstrap] = identifier[do_bootstrap] , identifier[do_build] = identifier[do_build] )
def virt_conf_from_stream(self, conf_fd, template_repo=None, template_store=None, do_bootstrap=True, do_build=True): """ Initializes all the virt infrastructure of the prefix, creating the domains disks, doing any network leases and creating all the virt related files and dirs inside this prefix. Args: conf_fd (File): File like object to read the config from template_repo (TemplateRepository): template repository intance template_store (TemplateStore): template store instance Returns: None """ virt_conf = utils.load_virt_stream(conf_fd) LOGGER.debug('Loaded virt config:\n%s', virt_conf) return self.virt_conf(conf=virt_conf, template_repo=template_repo, template_store=template_store, do_bootstrap=do_bootstrap, do_build=do_build)
def data(self, index, role=Qt.DisplayRole): """Override Qt method""" if not index.isValid() or not 0 <= index.row() < len(self._rows): return to_qvariant() row = index.row() column = index.column() name, state = self.row(row) if role == Qt.DisplayRole or role == Qt.EditRole: if column == 0: return to_qvariant(name) elif role == Qt.CheckStateRole: if column == 0: if state: return Qt.Checked else: return Qt.Unchecked if column == 1: return to_qvariant(state) return to_qvariant()
def function[data, parameter[self, index, role]]: constant[Override Qt method] if <ast.BoolOp object at 0x7da1b26aebc0> begin[:] return[call[name[to_qvariant], parameter[]]] variable[row] assign[=] call[name[index].row, parameter[]] variable[column] assign[=] call[name[index].column, parameter[]] <ast.Tuple object at 0x7da1b26af100> assign[=] call[name[self].row, parameter[name[row]]] if <ast.BoolOp object at 0x7da1b26acbe0> begin[:] if compare[name[column] equal[==] constant[0]] begin[:] return[call[name[to_qvariant], parameter[name[name]]]] return[call[name[to_qvariant], parameter[]]]
keyword[def] identifier[data] ( identifier[self] , identifier[index] , identifier[role] = identifier[Qt] . identifier[DisplayRole] ): literal[string] keyword[if] keyword[not] identifier[index] . identifier[isValid] () keyword[or] keyword[not] literal[int] <= identifier[index] . identifier[row] ()< identifier[len] ( identifier[self] . identifier[_rows] ): keyword[return] identifier[to_qvariant] () identifier[row] = identifier[index] . identifier[row] () identifier[column] = identifier[index] . identifier[column] () identifier[name] , identifier[state] = identifier[self] . identifier[row] ( identifier[row] ) keyword[if] identifier[role] == identifier[Qt] . identifier[DisplayRole] keyword[or] identifier[role] == identifier[Qt] . identifier[EditRole] : keyword[if] identifier[column] == literal[int] : keyword[return] identifier[to_qvariant] ( identifier[name] ) keyword[elif] identifier[role] == identifier[Qt] . identifier[CheckStateRole] : keyword[if] identifier[column] == literal[int] : keyword[if] identifier[state] : keyword[return] identifier[Qt] . identifier[Checked] keyword[else] : keyword[return] identifier[Qt] . identifier[Unchecked] keyword[if] identifier[column] == literal[int] : keyword[return] identifier[to_qvariant] ( identifier[state] ) keyword[return] identifier[to_qvariant] ()
def data(self, index, role=Qt.DisplayRole): """Override Qt method""" if not index.isValid() or not 0 <= index.row() < len(self._rows): return to_qvariant() # depends on [control=['if'], data=[]] row = index.row() column = index.column() (name, state) = self.row(row) if role == Qt.DisplayRole or role == Qt.EditRole: if column == 0: return to_qvariant(name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif role == Qt.CheckStateRole: if column == 0: if state: return Qt.Checked # depends on [control=['if'], data=[]] else: return Qt.Unchecked # depends on [control=['if'], data=[]] if column == 1: return to_qvariant(state) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return to_qvariant()
def GMSK_bb(N_bits, Ns, MSK = 0,BT = 0.35): """ MSK/GMSK Complex Baseband Modulation x,data = gmsk(N_bits, Ns, BT = 0.35, MSK = 0) Parameters ---------- N_bits : number of symbols processed Ns : the number of samples per bit MSK : 0 for no shaping which is standard MSK, MSK <> 0 --> GMSK is generated. BT : premodulation Bb*T product which sets the bandwidth of the Gaussian lowpass filter Mark Wickert Python version November 2014 """ x, b, data = NRZ_bits(N_bits,Ns) # pulse length 2*M*Ns M = 4 n = np.arange(-M*Ns,M*Ns+1) p = np.exp(-2*np.pi**2*BT**2/np.log(2)*(n/float(Ns))**2); p = p/np.sum(p); # Gaussian pulse shape if MSK not zero if MSK != 0: x = signal.lfilter(p,1,x) y = np.exp(1j*np.pi/2*np.cumsum(x)/Ns) return y, data
def function[GMSK_bb, parameter[N_bits, Ns, MSK, BT]]: constant[ MSK/GMSK Complex Baseband Modulation x,data = gmsk(N_bits, Ns, BT = 0.35, MSK = 0) Parameters ---------- N_bits : number of symbols processed Ns : the number of samples per bit MSK : 0 for no shaping which is standard MSK, MSK <> 0 --> GMSK is generated. BT : premodulation Bb*T product which sets the bandwidth of the Gaussian lowpass filter Mark Wickert Python version November 2014 ] <ast.Tuple object at 0x7da20c6e62f0> assign[=] call[name[NRZ_bits], parameter[name[N_bits], name[Ns]]] variable[M] assign[=] constant[4] variable[n] assign[=] call[name[np].arange, parameter[binary_operation[<ast.UnaryOp object at 0x7da2054a7850> * name[Ns]], binary_operation[binary_operation[name[M] * name[Ns]] + constant[1]]]] variable[p] assign[=] call[name[np].exp, parameter[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da2054a4ac0> * binary_operation[name[np].pi ** constant[2]]] * binary_operation[name[BT] ** constant[2]]] / call[name[np].log, parameter[constant[2]]]] * binary_operation[binary_operation[name[n] / call[name[float], parameter[name[Ns]]]] ** constant[2]]]]] variable[p] assign[=] binary_operation[name[p] / call[name[np].sum, parameter[name[p]]]] if compare[name[MSK] not_equal[!=] constant[0]] begin[:] variable[x] assign[=] call[name[signal].lfilter, parameter[name[p], constant[1], name[x]]] variable[y] assign[=] call[name[np].exp, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[1j] * name[np].pi] / constant[2]] * call[name[np].cumsum, parameter[name[x]]]] / name[Ns]]]] return[tuple[[<ast.Name object at 0x7da204620400>, <ast.Name object at 0x7da204623010>]]]
keyword[def] identifier[GMSK_bb] ( identifier[N_bits] , identifier[Ns] , identifier[MSK] = literal[int] , identifier[BT] = literal[int] ): literal[string] identifier[x] , identifier[b] , identifier[data] = identifier[NRZ_bits] ( identifier[N_bits] , identifier[Ns] ) identifier[M] = literal[int] identifier[n] = identifier[np] . identifier[arange] (- identifier[M] * identifier[Ns] , identifier[M] * identifier[Ns] + literal[int] ) identifier[p] = identifier[np] . identifier[exp] (- literal[int] * identifier[np] . identifier[pi] ** literal[int] * identifier[BT] ** literal[int] / identifier[np] . identifier[log] ( literal[int] )*( identifier[n] / identifier[float] ( identifier[Ns] ))** literal[int] ); identifier[p] = identifier[p] / identifier[np] . identifier[sum] ( identifier[p] ); keyword[if] identifier[MSK] != literal[int] : identifier[x] = identifier[signal] . identifier[lfilter] ( identifier[p] , literal[int] , identifier[x] ) identifier[y] = identifier[np] . identifier[exp] ( literal[int] * identifier[np] . identifier[pi] / literal[int] * identifier[np] . identifier[cumsum] ( identifier[x] )/ identifier[Ns] ) keyword[return] identifier[y] , identifier[data]
def GMSK_bb(N_bits, Ns, MSK=0, BT=0.35): """ MSK/GMSK Complex Baseband Modulation x,data = gmsk(N_bits, Ns, BT = 0.35, MSK = 0) Parameters ---------- N_bits : number of symbols processed Ns : the number of samples per bit MSK : 0 for no shaping which is standard MSK, MSK <> 0 --> GMSK is generated. BT : premodulation Bb*T product which sets the bandwidth of the Gaussian lowpass filter Mark Wickert Python version November 2014 """ (x, b, data) = NRZ_bits(N_bits, Ns) # pulse length 2*M*Ns M = 4 n = np.arange(-M * Ns, M * Ns + 1) p = np.exp(-2 * np.pi ** 2 * BT ** 2 / np.log(2) * (n / float(Ns)) ** 2) p = p / np.sum(p) # Gaussian pulse shape if MSK not zero if MSK != 0: x = signal.lfilter(p, 1, x) # depends on [control=['if'], data=[]] y = np.exp(1j * np.pi / 2 * np.cumsum(x) / Ns) return (y, data)
def OnLinkVLCVideo(self, event): """VLC video code event handler""" key = self.grid.actions.cursor if event.videofile: try: video_volume = \ self.grid.code_array.cell_attributes[key]["video_volume"] except KeyError: video_volume = None self.grid.actions.set_attr("panel_cell", True) if video_volume is not None: code = 'vlcpanel_factory("{}", {})'.format( event.videofile, video_volume) else: code = 'vlcpanel_factory("{}")'.format(event.videofile) self.grid.actions.set_code(key, code) else: try: video_panel = self.grid.grid_renderer.video_cells.pop(key) video_panel.player.stop() video_panel.player.release() video_panel.Destroy() except KeyError: pass self.grid.actions.set_code(key, u"")
def function[OnLinkVLCVideo, parameter[self, event]]: constant[VLC video code event handler] variable[key] assign[=] name[self].grid.actions.cursor if name[event].videofile begin[:] <ast.Try object at 0x7da1b16be0b0> call[name[self].grid.actions.set_attr, parameter[constant[panel_cell], constant[True]]] if compare[name[video_volume] is_not constant[None]] begin[:] variable[code] assign[=] call[constant[vlcpanel_factory("{}", {})].format, parameter[name[event].videofile, name[video_volume]]] call[name[self].grid.actions.set_code, parameter[name[key], name[code]]]
keyword[def] identifier[OnLinkVLCVideo] ( identifier[self] , identifier[event] ): literal[string] identifier[key] = identifier[self] . identifier[grid] . identifier[actions] . identifier[cursor] keyword[if] identifier[event] . identifier[videofile] : keyword[try] : identifier[video_volume] = identifier[self] . identifier[grid] . identifier[code_array] . identifier[cell_attributes] [ identifier[key] ][ literal[string] ] keyword[except] identifier[KeyError] : identifier[video_volume] = keyword[None] identifier[self] . identifier[grid] . identifier[actions] . identifier[set_attr] ( literal[string] , keyword[True] ) keyword[if] identifier[video_volume] keyword[is] keyword[not] keyword[None] : identifier[code] = literal[string] . identifier[format] ( identifier[event] . identifier[videofile] , identifier[video_volume] ) keyword[else] : identifier[code] = literal[string] . identifier[format] ( identifier[event] . identifier[videofile] ) identifier[self] . identifier[grid] . identifier[actions] . identifier[set_code] ( identifier[key] , identifier[code] ) keyword[else] : keyword[try] : identifier[video_panel] = identifier[self] . identifier[grid] . identifier[grid_renderer] . identifier[video_cells] . identifier[pop] ( identifier[key] ) identifier[video_panel] . identifier[player] . identifier[stop] () identifier[video_panel] . identifier[player] . identifier[release] () identifier[video_panel] . identifier[Destroy] () keyword[except] identifier[KeyError] : keyword[pass] identifier[self] . identifier[grid] . identifier[actions] . identifier[set_code] ( identifier[key] , literal[string] )
def OnLinkVLCVideo(self, event): """VLC video code event handler""" key = self.grid.actions.cursor if event.videofile: try: video_volume = self.grid.code_array.cell_attributes[key]['video_volume'] # depends on [control=['try'], data=[]] except KeyError: video_volume = None # depends on [control=['except'], data=[]] self.grid.actions.set_attr('panel_cell', True) if video_volume is not None: code = 'vlcpanel_factory("{}", {})'.format(event.videofile, video_volume) # depends on [control=['if'], data=['video_volume']] else: code = 'vlcpanel_factory("{}")'.format(event.videofile) self.grid.actions.set_code(key, code) # depends on [control=['if'], data=[]] else: try: video_panel = self.grid.grid_renderer.video_cells.pop(key) video_panel.player.stop() video_panel.player.release() video_panel.Destroy() # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] self.grid.actions.set_code(key, u'')
def publish_delayed_metric(self, name, value, timestamp, raw_value=None, precision=0, metric_type='GAUGE', instance=None): """ Metrics may not be immediately available when querying cloudwatch. Hence, allow the ability to publish a metric from some the past given its timestamp. """ # Get metric Path path = self.get_metric_path(name, instance) # Get metric TTL ttl = float(self.config['interval']) * float( self.config['ttl_multiplier']) # Create Metric metric = Metric(path, value, raw_value=raw_value, timestamp=timestamp, precision=precision, host=self.get_hostname(), metric_type=metric_type, ttl=ttl) # Publish Metric self.publish_metric(metric)
def function[publish_delayed_metric, parameter[self, name, value, timestamp, raw_value, precision, metric_type, instance]]: constant[ Metrics may not be immediately available when querying cloudwatch. Hence, allow the ability to publish a metric from some the past given its timestamp. ] variable[path] assign[=] call[name[self].get_metric_path, parameter[name[name], name[instance]]] variable[ttl] assign[=] binary_operation[call[name[float], parameter[call[name[self].config][constant[interval]]]] * call[name[float], parameter[call[name[self].config][constant[ttl_multiplier]]]]] variable[metric] assign[=] call[name[Metric], parameter[name[path], name[value]]] call[name[self].publish_metric, parameter[name[metric]]]
keyword[def] identifier[publish_delayed_metric] ( identifier[self] , identifier[name] , identifier[value] , identifier[timestamp] , identifier[raw_value] = keyword[None] , identifier[precision] = literal[int] , identifier[metric_type] = literal[string] , identifier[instance] = keyword[None] ): literal[string] identifier[path] = identifier[self] . identifier[get_metric_path] ( identifier[name] , identifier[instance] ) identifier[ttl] = identifier[float] ( identifier[self] . identifier[config] [ literal[string] ])* identifier[float] ( identifier[self] . identifier[config] [ literal[string] ]) identifier[metric] = identifier[Metric] ( identifier[path] , identifier[value] , identifier[raw_value] = identifier[raw_value] , identifier[timestamp] = identifier[timestamp] , identifier[precision] = identifier[precision] , identifier[host] = identifier[self] . identifier[get_hostname] (), identifier[metric_type] = identifier[metric_type] , identifier[ttl] = identifier[ttl] ) identifier[self] . identifier[publish_metric] ( identifier[metric] )
def publish_delayed_metric(self, name, value, timestamp, raw_value=None, precision=0, metric_type='GAUGE', instance=None): """ Metrics may not be immediately available when querying cloudwatch. Hence, allow the ability to publish a metric from some the past given its timestamp. """ # Get metric Path path = self.get_metric_path(name, instance) # Get metric TTL ttl = float(self.config['interval']) * float(self.config['ttl_multiplier']) # Create Metric metric = Metric(path, value, raw_value=raw_value, timestamp=timestamp, precision=precision, host=self.get_hostname(), metric_type=metric_type, ttl=ttl) # Publish Metric self.publish_metric(metric)
def worker(): """ Initialize the distributed environment. """ import torch import torch.distributed as dist from torch.multiprocessing import Process import numpy as np print("Initializing distributed pytorch") os.environ['MASTER_ADDR'] = str(args.master_addr) os.environ['MASTER_PORT'] = str(args.master_port) # Use TCP backend. Gloo needs nightly, where it currently fails with # dist.init_process_group('gloo', rank=args.rank, # AttributeError: module 'torch.distributed' has no attribute 'init_process_group' dist.init_process_group('tcp', rank=args.rank, world_size=args.size) tensor = torch.ones(args.size_mb*250*1000)*(args.rank+1) time_list = [] outfile = 'out' if args.rank == 0 else '/dev/null' log = util.FileLogger(outfile) for i in range(args.iters): # print('before: rank ', args.rank, ' has data ', tensor[0]) start_time = time.perf_counter() if args.rank == 0: dist.send(tensor=tensor, dst=1) else: dist.recv(tensor=tensor, src=0) elapsed_time_ms = (time.perf_counter() - start_time)*1000 time_list.append(elapsed_time_ms) # print('after: rank ', args.rank, ' has data ', tensor[0]) rate = args.size_mb/(elapsed_time_ms/1000) log('%03d/%d added %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) min = np.min(time_list) median = np.median(time_list) log(f"min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}")
def function[worker, parameter[]]: constant[ Initialize the distributed environment. ] import module[torch] import module[torch.distributed] as alias[dist] from relative_module[torch.multiprocessing] import module[Process] import module[numpy] as alias[np] call[name[print], parameter[constant[Initializing distributed pytorch]]] call[name[os].environ][constant[MASTER_ADDR]] assign[=] call[name[str], parameter[name[args].master_addr]] call[name[os].environ][constant[MASTER_PORT]] assign[=] call[name[str], parameter[name[args].master_port]] call[name[dist].init_process_group, parameter[constant[tcp]]] variable[tensor] assign[=] binary_operation[call[name[torch].ones, parameter[binary_operation[binary_operation[name[args].size_mb * constant[250]] * constant[1000]]]] * binary_operation[name[args].rank + constant[1]]] variable[time_list] assign[=] list[[]] variable[outfile] assign[=] <ast.IfExp object at 0x7da2054a4ee0> variable[log] assign[=] call[name[util].FileLogger, parameter[name[outfile]]] for taget[name[i]] in starred[call[name[range], parameter[name[args].iters]]] begin[:] variable[start_time] assign[=] call[name[time].perf_counter, parameter[]] if compare[name[args].rank equal[==] constant[0]] begin[:] call[name[dist].send, parameter[]] variable[elapsed_time_ms] assign[=] binary_operation[binary_operation[call[name[time].perf_counter, parameter[]] - name[start_time]] * constant[1000]] call[name[time_list].append, parameter[name[elapsed_time_ms]]] variable[rate] assign[=] binary_operation[name[args].size_mb / binary_operation[name[elapsed_time_ms] / constant[1000]]] call[name[log], parameter[binary_operation[constant[%03d/%d added %d MBs in %.1f ms: %.2f MB/second] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a6f20>, <ast.Attribute object at 0x7da2054a7cd0>, <ast.Attribute object at 0x7da2054a4760>, <ast.Name object at 0x7da2054a55a0>, <ast.Name object at 0x7da2054a7790>]]]]] variable[min] assign[=] call[name[np].min, parameter[name[time_list]]] variable[median] assign[=] call[name[np].median, parameter[name[time_list]]] call[name[log], parameter[<ast.JoinedStr object at 0x7da2054a5ae0>]]
keyword[def] identifier[worker] (): literal[string] keyword[import] identifier[torch] keyword[import] identifier[torch] . identifier[distributed] keyword[as] identifier[dist] keyword[from] identifier[torch] . identifier[multiprocessing] keyword[import] identifier[Process] keyword[import] identifier[numpy] keyword[as] identifier[np] identifier[print] ( literal[string] ) identifier[os] . identifier[environ] [ literal[string] ]= identifier[str] ( identifier[args] . identifier[master_addr] ) identifier[os] . identifier[environ] [ literal[string] ]= identifier[str] ( identifier[args] . identifier[master_port] ) identifier[dist] . identifier[init_process_group] ( literal[string] , identifier[rank] = identifier[args] . identifier[rank] , identifier[world_size] = identifier[args] . identifier[size] ) identifier[tensor] = identifier[torch] . identifier[ones] ( identifier[args] . identifier[size_mb] * literal[int] * literal[int] )*( identifier[args] . identifier[rank] + literal[int] ) identifier[time_list] =[] identifier[outfile] = literal[string] keyword[if] identifier[args] . identifier[rank] == literal[int] keyword[else] literal[string] identifier[log] = identifier[util] . identifier[FileLogger] ( identifier[outfile] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[args] . identifier[iters] ): identifier[start_time] = identifier[time] . identifier[perf_counter] () keyword[if] identifier[args] . identifier[rank] == literal[int] : identifier[dist] . identifier[send] ( identifier[tensor] = identifier[tensor] , identifier[dst] = literal[int] ) keyword[else] : identifier[dist] . identifier[recv] ( identifier[tensor] = identifier[tensor] , identifier[src] = literal[int] ) identifier[elapsed_time_ms] =( identifier[time] . identifier[perf_counter] ()- identifier[start_time] )* literal[int] identifier[time_list] . identifier[append] ( identifier[elapsed_time_ms] ) identifier[rate] = identifier[args] . identifier[size_mb] /( identifier[elapsed_time_ms] / literal[int] ) identifier[log] ( literal[string] %( identifier[i] , identifier[args] . identifier[iters] , identifier[args] . identifier[size_mb] , identifier[elapsed_time_ms] , identifier[rate] )) identifier[min] = identifier[np] . identifier[min] ( identifier[time_list] ) identifier[median] = identifier[np] . identifier[median] ( identifier[time_list] ) identifier[log] ( literal[string] )
def worker(): """ Initialize the distributed environment. """ import torch import torch.distributed as dist from torch.multiprocessing import Process import numpy as np print('Initializing distributed pytorch') os.environ['MASTER_ADDR'] = str(args.master_addr) os.environ['MASTER_PORT'] = str(args.master_port) # Use TCP backend. Gloo needs nightly, where it currently fails with # dist.init_process_group('gloo', rank=args.rank, # AttributeError: module 'torch.distributed' has no attribute 'init_process_group' dist.init_process_group('tcp', rank=args.rank, world_size=args.size) tensor = torch.ones(args.size_mb * 250 * 1000) * (args.rank + 1) time_list = [] outfile = 'out' if args.rank == 0 else '/dev/null' log = util.FileLogger(outfile) for i in range(args.iters): # print('before: rank ', args.rank, ' has data ', tensor[0]) start_time = time.perf_counter() if args.rank == 0: dist.send(tensor=tensor, dst=1) # depends on [control=['if'], data=[]] else: dist.recv(tensor=tensor, src=0) elapsed_time_ms = (time.perf_counter() - start_time) * 1000 time_list.append(elapsed_time_ms) # print('after: rank ', args.rank, ' has data ', tensor[0]) rate = args.size_mb / (elapsed_time_ms / 1000) log('%03d/%d added %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) # depends on [control=['for'], data=['i']] min = np.min(time_list) median = np.median(time_list) log(f'min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}')
def analyse(self, traj, network, current_subrun, subrun_list, network_dict): """Calculates average Fano Factor of a network. :param traj: Trajectory container Expects: `results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons Adds: `results.statistics.mean_fano_factor`: Average Fano Factor :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: Upcoming subruns, analysis is only performed if subruns is empty, aka the final subrun has finished. :param network_dict: Dictionary of items shared among componetns """ #Check if we finished all subruns if len(subrun_list)==0: spikes_e = traj.results.monitors.spikes_e time_window = traj.parameters.analysis.statistics.time_window start_time = traj.parameters.simulation.durations.initial_run end_time = start_time+traj.parameters.simulation.durations.measurement_run neuron_ids = traj.parameters.analysis.statistics.neuron_ids mean_ff = self._compute_mean_fano_factor( neuron_ids, spikes_e, time_window, start_time, end_time) traj.f_add_result('statistics.mean_fano_factor', mean_ff, comment='Average Fano ' 'Factor over all ' 'exc neurons') print('R_ee: %f, Mean FF: %f' % (traj.R_ee, mean_ff))
def function[analyse, parameter[self, traj, network, current_subrun, subrun_list, network_dict]]: constant[Calculates average Fano Factor of a network. :param traj: Trajectory container Expects: `results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons Adds: `results.statistics.mean_fano_factor`: Average Fano Factor :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: Upcoming subruns, analysis is only performed if subruns is empty, aka the final subrun has finished. :param network_dict: Dictionary of items shared among componetns ] if compare[call[name[len], parameter[name[subrun_list]]] equal[==] constant[0]] begin[:] variable[spikes_e] assign[=] name[traj].results.monitors.spikes_e variable[time_window] assign[=] name[traj].parameters.analysis.statistics.time_window variable[start_time] assign[=] name[traj].parameters.simulation.durations.initial_run variable[end_time] assign[=] binary_operation[name[start_time] + name[traj].parameters.simulation.durations.measurement_run] variable[neuron_ids] assign[=] name[traj].parameters.analysis.statistics.neuron_ids variable[mean_ff] assign[=] call[name[self]._compute_mean_fano_factor, parameter[name[neuron_ids], name[spikes_e], name[time_window], name[start_time], name[end_time]]] call[name[traj].f_add_result, parameter[constant[statistics.mean_fano_factor], name[mean_ff]]] call[name[print], parameter[binary_operation[constant[R_ee: %f, Mean FF: %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6ab970>, <ast.Name object at 0x7da20c6ab730>]]]]]
keyword[def] identifier[analyse] ( identifier[self] , identifier[traj] , identifier[network] , identifier[current_subrun] , identifier[subrun_list] , identifier[network_dict] ): literal[string] keyword[if] identifier[len] ( identifier[subrun_list] )== literal[int] : identifier[spikes_e] = identifier[traj] . identifier[results] . identifier[monitors] . identifier[spikes_e] identifier[time_window] = identifier[traj] . identifier[parameters] . identifier[analysis] . identifier[statistics] . identifier[time_window] identifier[start_time] = identifier[traj] . identifier[parameters] . identifier[simulation] . identifier[durations] . identifier[initial_run] identifier[end_time] = identifier[start_time] + identifier[traj] . identifier[parameters] . identifier[simulation] . identifier[durations] . identifier[measurement_run] identifier[neuron_ids] = identifier[traj] . identifier[parameters] . identifier[analysis] . identifier[statistics] . identifier[neuron_ids] identifier[mean_ff] = identifier[self] . identifier[_compute_mean_fano_factor] ( identifier[neuron_ids] , identifier[spikes_e] , identifier[time_window] , identifier[start_time] , identifier[end_time] ) identifier[traj] . identifier[f_add_result] ( literal[string] , identifier[mean_ff] , identifier[comment] = literal[string] literal[string] literal[string] ) identifier[print] ( literal[string] %( identifier[traj] . identifier[R_ee] , identifier[mean_ff] ))
def analyse(self, traj, network, current_subrun, subrun_list, network_dict): """Calculates average Fano Factor of a network. :param traj: Trajectory container Expects: `results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons Adds: `results.statistics.mean_fano_factor`: Average Fano Factor :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: Upcoming subruns, analysis is only performed if subruns is empty, aka the final subrun has finished. :param network_dict: Dictionary of items shared among componetns """ #Check if we finished all subruns if len(subrun_list) == 0: spikes_e = traj.results.monitors.spikes_e time_window = traj.parameters.analysis.statistics.time_window start_time = traj.parameters.simulation.durations.initial_run end_time = start_time + traj.parameters.simulation.durations.measurement_run neuron_ids = traj.parameters.analysis.statistics.neuron_ids mean_ff = self._compute_mean_fano_factor(neuron_ids, spikes_e, time_window, start_time, end_time) traj.f_add_result('statistics.mean_fano_factor', mean_ff, comment='Average Fano Factor over all exc neurons') print('R_ee: %f, Mean FF: %f' % (traj.R_ee, mean_ff)) # depends on [control=['if'], data=[]]
def branch_lengths(self, terminal=True, internal=True): '''Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length Args: ``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False`` ''' if not isinstance(terminal, bool): raise TypeError("terminal must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") for node in self.traverse_preorder(): if (internal and not node.is_leaf()) or (terminal and node.is_leaf()): if node.edge_length is None: yield 0 else: yield node.edge_length
def function[branch_lengths, parameter[self, terminal, internal]]: constant[Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length Args: ``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False`` ] if <ast.UnaryOp object at 0x7da1b0b5c070> begin[:] <ast.Raise object at 0x7da1b0b5faf0> if <ast.UnaryOp object at 0x7da1b0b5f970> begin[:] <ast.Raise object at 0x7da1b0b5c8b0> for taget[name[node]] in starred[call[name[self].traverse_preorder, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b0b5efe0> begin[:] if compare[name[node].edge_length is constant[None]] begin[:] <ast.Yield object at 0x7da1b0b5d630>
keyword[def] identifier[branch_lengths] ( identifier[self] , identifier[terminal] = keyword[True] , identifier[internal] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[terminal] , identifier[bool] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[internal] , identifier[bool] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[traverse_preorder] (): keyword[if] ( identifier[internal] keyword[and] keyword[not] identifier[node] . identifier[is_leaf] ()) keyword[or] ( identifier[terminal] keyword[and] identifier[node] . identifier[is_leaf] ()): keyword[if] identifier[node] . identifier[edge_length] keyword[is] keyword[None] : keyword[yield] literal[int] keyword[else] : keyword[yield] identifier[node] . identifier[edge_length]
def branch_lengths(self, terminal=True, internal=True): """Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length Args: ``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False`` """ if not isinstance(terminal, bool): raise TypeError('terminal must be a bool') # depends on [control=['if'], data=[]] if not isinstance(internal, bool): raise TypeError('internal must be a bool') # depends on [control=['if'], data=[]] for node in self.traverse_preorder(): if internal and (not node.is_leaf()) or (terminal and node.is_leaf()): if node.edge_length is None: yield 0 # depends on [control=['if'], data=[]] else: yield node.edge_length # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
def hide_url_password(url): """Replace a password part of a URL with *****. This can be used to scrub URLs before logging them. """ try: parsed = parse.urlsplit(url) if parsed.password: return url.replace(':%s@' % parsed.password, ':*****@') except Exception: # pylint: disable=W0703 pass return url
def function[hide_url_password, parameter[url]]: constant[Replace a password part of a URL with *****. This can be used to scrub URLs before logging them. ] <ast.Try object at 0x7da1b09e80d0> return[name[url]]
keyword[def] identifier[hide_url_password] ( identifier[url] ): literal[string] keyword[try] : identifier[parsed] = identifier[parse] . identifier[urlsplit] ( identifier[url] ) keyword[if] identifier[parsed] . identifier[password] : keyword[return] identifier[url] . identifier[replace] ( literal[string] % identifier[parsed] . identifier[password] , literal[string] ) keyword[except] identifier[Exception] : keyword[pass] keyword[return] identifier[url]
def hide_url_password(url): """Replace a password part of a URL with *****. This can be used to scrub URLs before logging them. """ try: parsed = parse.urlsplit(url) if parsed.password: return url.replace(':%s@' % parsed.password, ':*****@') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception: # pylint: disable=W0703 pass # depends on [control=['except'], data=[]] return url
def list_subnets(self, identifier=None, datacenter=None, version=0, subnet_type=None, network_space=None, **kwargs): """Display a list of all subnets on the account. This provides a quick overview of all subnets including information about data center residence and the number of devices attached. :param string identifier: If specified, the list will only contain the subnet matching this network identifier. :param string datacenter: If specified, the list will only contain subnets in the specified data center. :param int version: Only returns subnets of this version (4 or 6). :param string subnet_type: If specified, it will only returns subnets of this type. :param string network_space: If specified, it will only returns subnets with the given address space label. :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) """ if 'mask' not in kwargs: kwargs['mask'] = DEFAULT_SUBNET_MASK _filter = utils.NestedDict(kwargs.get('filter') or {}) if identifier: _filter['subnets']['networkIdentifier'] = ( utils.query_filter(identifier)) if datacenter: _filter['subnets']['datacenter']['name'] = ( utils.query_filter(datacenter)) if version: _filter['subnets']['version'] = utils.query_filter(version) if subnet_type: _filter['subnets']['subnetType'] = utils.query_filter(subnet_type) else: # This filters out global IPs from the subnet listing. _filter['subnets']['subnetType'] = {'operation': '!= GLOBAL_IP'} if network_space: _filter['subnets']['networkVlan']['networkSpace'] = ( utils.query_filter(network_space)) kwargs['filter'] = _filter.to_dict() kwargs['iter'] = True return self.client.call('Account', 'getSubnets', **kwargs)
def function[list_subnets, parameter[self, identifier, datacenter, version, subnet_type, network_space]]: constant[Display a list of all subnets on the account. This provides a quick overview of all subnets including information about data center residence and the number of devices attached. :param string identifier: If specified, the list will only contain the subnet matching this network identifier. :param string datacenter: If specified, the list will only contain subnets in the specified data center. :param int version: Only returns subnets of this version (4 or 6). :param string subnet_type: If specified, it will only returns subnets of this type. :param string network_space: If specified, it will only returns subnets with the given address space label. :param dict \*\*kwargs: response-level options (mask, limit, etc.) ] if compare[constant[mask] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[mask]] assign[=] name[DEFAULT_SUBNET_MASK] variable[_filter] assign[=] call[name[utils].NestedDict, parameter[<ast.BoolOp object at 0x7da18dc07eb0>]] if name[identifier] begin[:] call[call[name[_filter]][constant[subnets]]][constant[networkIdentifier]] assign[=] call[name[utils].query_filter, parameter[name[identifier]]] if name[datacenter] begin[:] call[call[call[name[_filter]][constant[subnets]]][constant[datacenter]]][constant[name]] assign[=] call[name[utils].query_filter, parameter[name[datacenter]]] if name[version] begin[:] call[call[name[_filter]][constant[subnets]]][constant[version]] assign[=] call[name[utils].query_filter, parameter[name[version]]] if name[subnet_type] begin[:] call[call[name[_filter]][constant[subnets]]][constant[subnetType]] assign[=] call[name[utils].query_filter, parameter[name[subnet_type]]] if name[network_space] begin[:] call[call[call[name[_filter]][constant[subnets]]][constant[networkVlan]]][constant[networkSpace]] assign[=] call[name[utils].query_filter, parameter[name[network_space]]] call[name[kwargs]][constant[filter]] assign[=] call[name[_filter].to_dict, parameter[]] call[name[kwargs]][constant[iter]] assign[=] constant[True] return[call[name[self].client.call, parameter[constant[Account], constant[getSubnets]]]]
keyword[def] identifier[list_subnets] ( identifier[self] , identifier[identifier] = keyword[None] , identifier[datacenter] = keyword[None] , identifier[version] = literal[int] , identifier[subnet_type] = keyword[None] , identifier[network_space] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[DEFAULT_SUBNET_MASK] identifier[_filter] = identifier[utils] . identifier[NestedDict] ( identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] {}) keyword[if] identifier[identifier] : identifier[_filter] [ literal[string] ][ literal[string] ]=( identifier[utils] . identifier[query_filter] ( identifier[identifier] )) keyword[if] identifier[datacenter] : identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=( identifier[utils] . identifier[query_filter] ( identifier[datacenter] )) keyword[if] identifier[version] : identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[version] ) keyword[if] identifier[subnet_type] : identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[subnet_type] ) keyword[else] : identifier[_filter] [ literal[string] ][ literal[string] ]={ literal[string] : literal[string] } keyword[if] identifier[network_space] : identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=( identifier[utils] . identifier[query_filter] ( identifier[network_space] )) identifier[kwargs] [ literal[string] ]= identifier[_filter] . identifier[to_dict] () identifier[kwargs] [ literal[string] ]= keyword[True] keyword[return] identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] ,** identifier[kwargs] )
def list_subnets(self, identifier=None, datacenter=None, version=0, subnet_type=None, network_space=None, **kwargs): """Display a list of all subnets on the account. This provides a quick overview of all subnets including information about data center residence and the number of devices attached. :param string identifier: If specified, the list will only contain the subnet matching this network identifier. :param string datacenter: If specified, the list will only contain subnets in the specified data center. :param int version: Only returns subnets of this version (4 or 6). :param string subnet_type: If specified, it will only returns subnets of this type. :param string network_space: If specified, it will only returns subnets with the given address space label. :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) """ if 'mask' not in kwargs: kwargs['mask'] = DEFAULT_SUBNET_MASK # depends on [control=['if'], data=['kwargs']] _filter = utils.NestedDict(kwargs.get('filter') or {}) if identifier: _filter['subnets']['networkIdentifier'] = utils.query_filter(identifier) # depends on [control=['if'], data=[]] if datacenter: _filter['subnets']['datacenter']['name'] = utils.query_filter(datacenter) # depends on [control=['if'], data=[]] if version: _filter['subnets']['version'] = utils.query_filter(version) # depends on [control=['if'], data=[]] if subnet_type: _filter['subnets']['subnetType'] = utils.query_filter(subnet_type) # depends on [control=['if'], data=[]] else: # This filters out global IPs from the subnet listing. _filter['subnets']['subnetType'] = {'operation': '!= GLOBAL_IP'} if network_space: _filter['subnets']['networkVlan']['networkSpace'] = utils.query_filter(network_space) # depends on [control=['if'], data=[]] kwargs['filter'] = _filter.to_dict() kwargs['iter'] = True return self.client.call('Account', 'getSubnets', **kwargs)
def data_item(data): """ When trying to return a meaningful error about an unexpected data item we cannot just `repr(data)` as that could show a gigantic data struture. This utility should try to get the key of the first item or the single item in the data structure. """ if isinstance(data, ndict): # OK, we have something that looks like {0: ('a', 'b')} # or something that is a regular dictionary # so try to return 'a' regardless of the length for item in data: return repr(data[item][0]) elif isinstance(data, dict): for item in data: return repr(data[item]) elif isinstance(data, list): return repr(data[0]) return repr(data)
def function[data_item, parameter[data]]: constant[ When trying to return a meaningful error about an unexpected data item we cannot just `repr(data)` as that could show a gigantic data struture. This utility should try to get the key of the first item or the single item in the data structure. ] if call[name[isinstance], parameter[name[data], name[ndict]]] begin[:] for taget[name[item]] in starred[name[data]] begin[:] return[call[name[repr], parameter[call[call[name[data]][name[item]]][constant[0]]]]] return[call[name[repr], parameter[name[data]]]]
keyword[def] identifier[data_item] ( identifier[data] ): literal[string] keyword[if] identifier[isinstance] ( identifier[data] , identifier[ndict] ): keyword[for] identifier[item] keyword[in] identifier[data] : keyword[return] identifier[repr] ( identifier[data] [ identifier[item] ][ literal[int] ]) keyword[elif] identifier[isinstance] ( identifier[data] , identifier[dict] ): keyword[for] identifier[item] keyword[in] identifier[data] : keyword[return] identifier[repr] ( identifier[data] [ identifier[item] ]) keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ): keyword[return] identifier[repr] ( identifier[data] [ literal[int] ]) keyword[return] identifier[repr] ( identifier[data] )
def data_item(data): """ When trying to return a meaningful error about an unexpected data item we cannot just `repr(data)` as that could show a gigantic data struture. This utility should try to get the key of the first item or the single item in the data structure. """ if isinstance(data, ndict): # OK, we have something that looks like {0: ('a', 'b')} # or something that is a regular dictionary # so try to return 'a' regardless of the length for item in data: return repr(data[item][0]) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] elif isinstance(data, dict): for item in data: return repr(data[item]) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] elif isinstance(data, list): return repr(data[0]) # depends on [control=['if'], data=[]] return repr(data)
def haslayer(self, cls): """Specific: NTPHeader().haslayer(NTP) should return True.""" if cls == "NTP": if isinstance(self, NTP): return True elif issubtype(cls, NTP): if isinstance(self, cls): return True return super(NTP, self).haslayer(cls)
def function[haslayer, parameter[self, cls]]: constant[Specific: NTPHeader().haslayer(NTP) should return True.] if compare[name[cls] equal[==] constant[NTP]] begin[:] if call[name[isinstance], parameter[name[self], name[NTP]]] begin[:] return[constant[True]] return[call[call[name[super], parameter[name[NTP], name[self]]].haslayer, parameter[name[cls]]]]
keyword[def] identifier[haslayer] ( identifier[self] , identifier[cls] ): literal[string] keyword[if] identifier[cls] == literal[string] : keyword[if] identifier[isinstance] ( identifier[self] , identifier[NTP] ): keyword[return] keyword[True] keyword[elif] identifier[issubtype] ( identifier[cls] , identifier[NTP] ): keyword[if] identifier[isinstance] ( identifier[self] , identifier[cls] ): keyword[return] keyword[True] keyword[return] identifier[super] ( identifier[NTP] , identifier[self] ). identifier[haslayer] ( identifier[cls] )
def haslayer(self, cls): """Specific: NTPHeader().haslayer(NTP) should return True.""" if cls == 'NTP': if isinstance(self, NTP): return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif issubtype(cls, NTP): if isinstance(self, cls): return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return super(NTP, self).haslayer(cls)
def _maybe_handle_help(self): """Handle requests for `help` information.""" if self._options.help_request: help_printer = HelpPrinter(self._options) result = help_printer.print_help() self._exiter(result)
def function[_maybe_handle_help, parameter[self]]: constant[Handle requests for `help` information.] if name[self]._options.help_request begin[:] variable[help_printer] assign[=] call[name[HelpPrinter], parameter[name[self]._options]] variable[result] assign[=] call[name[help_printer].print_help, parameter[]] call[name[self]._exiter, parameter[name[result]]]
keyword[def] identifier[_maybe_handle_help] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_options] . identifier[help_request] : identifier[help_printer] = identifier[HelpPrinter] ( identifier[self] . identifier[_options] ) identifier[result] = identifier[help_printer] . identifier[print_help] () identifier[self] . identifier[_exiter] ( identifier[result] )
def _maybe_handle_help(self): """Handle requests for `help` information.""" if self._options.help_request: help_printer = HelpPrinter(self._options) result = help_printer.print_help() self._exiter(result) # depends on [control=['if'], data=[]]
def getcols(sheetMatch=None,colMatch="Decay"): """find every column in every sheet and put it in a new sheet or book.""" book=BOOK() if sheetMatch is None: matchingSheets=book.sheetNames print('all %d sheets selected '%(len(matchingSheets))) else: matchingSheets=[x for x in book.sheetNames if sheetMatch in x] print('%d of %d sheets selected matching "%s"'%(len(matchingSheets),len(book.sheetNames),sheetMatch)) matchingSheetsWithCol=[] for sheetName in matchingSheets: i = book.sheetNames.index(sheetName) # index of that sheet for j,colName in enumerate(book.sheets[i].colDesc): if colMatch in colName: matchingSheetsWithCol.append((sheetName,j)) break else: print(" no match in [%s]%s"%(book.bookName,sheetName)) print("%d of %d of those have your column"%(len(matchingSheetsWithCol),len(matchingSheets))) for item in matchingSheetsWithCol: print(item,item[0],item[1])
def function[getcols, parameter[sheetMatch, colMatch]]: constant[find every column in every sheet and put it in a new sheet or book.] variable[book] assign[=] call[name[BOOK], parameter[]] if compare[name[sheetMatch] is constant[None]] begin[:] variable[matchingSheets] assign[=] name[book].sheetNames call[name[print], parameter[binary_operation[constant[all %d sheets selected ] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[matchingSheets]]]]]] variable[matchingSheetsWithCol] assign[=] list[[]] for taget[name[sheetName]] in starred[name[matchingSheets]] begin[:] variable[i] assign[=] call[name[book].sheetNames.index, parameter[name[sheetName]]] for taget[tuple[[<ast.Name object at 0x7da1b0d1a3e0>, <ast.Name object at 0x7da1b0d18f40>]]] in starred[call[name[enumerate], parameter[call[name[book].sheets][name[i]].colDesc]]] begin[:] if compare[name[colMatch] in name[colName]] begin[:] call[name[matchingSheetsWithCol].append, parameter[tuple[[<ast.Name object at 0x7da1b0d1b0d0>, <ast.Name object at 0x7da1b0d1baf0>]]]] break call[name[print], parameter[binary_operation[constant[%d of %d of those have your column] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0d5bd30>, <ast.Call object at 0x7da1b0d5b730>]]]]] for taget[name[item]] in starred[name[matchingSheetsWithCol]] begin[:] call[name[print], parameter[name[item], call[name[item]][constant[0]], call[name[item]][constant[1]]]]
keyword[def] identifier[getcols] ( identifier[sheetMatch] = keyword[None] , identifier[colMatch] = literal[string] ): literal[string] identifier[book] = identifier[BOOK] () keyword[if] identifier[sheetMatch] keyword[is] keyword[None] : identifier[matchingSheets] = identifier[book] . identifier[sheetNames] identifier[print] ( literal[string] %( identifier[len] ( identifier[matchingSheets] ))) keyword[else] : identifier[matchingSheets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[book] . identifier[sheetNames] keyword[if] identifier[sheetMatch] keyword[in] identifier[x] ] identifier[print] ( literal[string] %( identifier[len] ( identifier[matchingSheets] ), identifier[len] ( identifier[book] . identifier[sheetNames] ), identifier[sheetMatch] )) identifier[matchingSheetsWithCol] =[] keyword[for] identifier[sheetName] keyword[in] identifier[matchingSheets] : identifier[i] = identifier[book] . identifier[sheetNames] . identifier[index] ( identifier[sheetName] ) keyword[for] identifier[j] , identifier[colName] keyword[in] identifier[enumerate] ( identifier[book] . identifier[sheets] [ identifier[i] ]. identifier[colDesc] ): keyword[if] identifier[colMatch] keyword[in] identifier[colName] : identifier[matchingSheetsWithCol] . identifier[append] (( identifier[sheetName] , identifier[j] )) keyword[break] keyword[else] : identifier[print] ( literal[string] %( identifier[book] . identifier[bookName] , identifier[sheetName] )) identifier[print] ( literal[string] %( identifier[len] ( identifier[matchingSheetsWithCol] ), identifier[len] ( identifier[matchingSheets] ))) keyword[for] identifier[item] keyword[in] identifier[matchingSheetsWithCol] : identifier[print] ( identifier[item] , identifier[item] [ literal[int] ], identifier[item] [ literal[int] ])
def getcols(sheetMatch=None, colMatch='Decay'): """find every column in every sheet and put it in a new sheet or book.""" book = BOOK() if sheetMatch is None: matchingSheets = book.sheetNames print('all %d sheets selected ' % len(matchingSheets)) # depends on [control=['if'], data=[]] else: matchingSheets = [x for x in book.sheetNames if sheetMatch in x] print('%d of %d sheets selected matching "%s"' % (len(matchingSheets), len(book.sheetNames), sheetMatch)) matchingSheetsWithCol = [] for sheetName in matchingSheets: i = book.sheetNames.index(sheetName) # index of that sheet for (j, colName) in enumerate(book.sheets[i].colDesc): if colMatch in colName: matchingSheetsWithCol.append((sheetName, j)) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] else: print(' no match in [%s]%s' % (book.bookName, sheetName)) # depends on [control=['for'], data=['sheetName']] print('%d of %d of those have your column' % (len(matchingSheetsWithCol), len(matchingSheets))) for item in matchingSheetsWithCol: print(item, item[0], item[1]) # depends on [control=['for'], data=['item']]
def set_pkg_license_comment(self, doc, text): """Sets the package's license comment. Raises OrderError if no package previously defined. Raises CardinalityError if already set. Raises SPDXValueError if text is not free form text. """ self.assert_package_exists() if not self.package_license_comment_set: self.package_license_comment_set = True if validations.validate_pkg_lics_comment(text): doc.package.license_comment = str_from_text(text) return True else: raise SPDXValueError('Package::LicenseComment') else: raise CardinalityError('Package::LicenseComment')
def function[set_pkg_license_comment, parameter[self, doc, text]]: constant[Sets the package's license comment. Raises OrderError if no package previously defined. Raises CardinalityError if already set. Raises SPDXValueError if text is not free form text. ] call[name[self].assert_package_exists, parameter[]] if <ast.UnaryOp object at 0x7da18dc9b160> begin[:] name[self].package_license_comment_set assign[=] constant[True] if call[name[validations].validate_pkg_lics_comment, parameter[name[text]]] begin[:] name[doc].package.license_comment assign[=] call[name[str_from_text], parameter[name[text]]] return[constant[True]]
keyword[def] identifier[set_pkg_license_comment] ( identifier[self] , identifier[doc] , identifier[text] ): literal[string] identifier[self] . identifier[assert_package_exists] () keyword[if] keyword[not] identifier[self] . identifier[package_license_comment_set] : identifier[self] . identifier[package_license_comment_set] = keyword[True] keyword[if] identifier[validations] . identifier[validate_pkg_lics_comment] ( identifier[text] ): identifier[doc] . identifier[package] . identifier[license_comment] = identifier[str_from_text] ( identifier[text] ) keyword[return] keyword[True] keyword[else] : keyword[raise] identifier[SPDXValueError] ( literal[string] ) keyword[else] : keyword[raise] identifier[CardinalityError] ( literal[string] )
def set_pkg_license_comment(self, doc, text): """Sets the package's license comment. Raises OrderError if no package previously defined. Raises CardinalityError if already set. Raises SPDXValueError if text is not free form text. """ self.assert_package_exists() if not self.package_license_comment_set: self.package_license_comment_set = True if validations.validate_pkg_lics_comment(text): doc.package.license_comment = str_from_text(text) return True # depends on [control=['if'], data=[]] else: raise SPDXValueError('Package::LicenseComment') # depends on [control=['if'], data=[]] else: raise CardinalityError('Package::LicenseComment')
def mass(self,R,z=None,t=0.,forceint=False): """ NAME: mass PURPOSE: evaluate the mass enclosed INPUT: R - Cylindrical Galactocentric radius (can be Quantity) z= (None) vertical height (can be Quantity) t - time (optional; can be Quantity) KEYWORDS: forceint= if True, calculate the mass through integration of the density, even if an explicit expression for the mass exists OUTPUT: 1) for spherical potentials: M(<R) [or if z is None], when the mass is implemented explicitly, the mass enclosed within r = sqrt(R^2+z^2) is returned when not z is None; forceint will integrate between -z and z, so the two are inconsistent (If you care to have this changed, raise an issue on github) 2) for axisymmetric potentials: M(<R,<fabs(Z)) HISTORY: 2014-01-29 - Written - Bovy (IAS) """ if self.isNonAxi: raise NotImplementedError('mass for non-axisymmetric potentials is not currently supported') try: if forceint: raise AttributeError #Hack! return self._amp*self._mass(R,z=z,t=t) except AttributeError: #Use numerical integration to get the mass if z is None: return 4.*nu.pi\ *integrate.quad(lambda x: x**2.\ *self.dens(x,0., use_physical=False), 0.,R)[0] else: return 4.*nu.pi\ *integrate.dblquad(lambda y,x: x\ *self.dens(x,y,use_physical=False), 0.,R,lambda x: 0., lambda x: z)[0]
def function[mass, parameter[self, R, z, t, forceint]]: constant[ NAME: mass PURPOSE: evaluate the mass enclosed INPUT: R - Cylindrical Galactocentric radius (can be Quantity) z= (None) vertical height (can be Quantity) t - time (optional; can be Quantity) KEYWORDS: forceint= if True, calculate the mass through integration of the density, even if an explicit expression for the mass exists OUTPUT: 1) for spherical potentials: M(<R) [or if z is None], when the mass is implemented explicitly, the mass enclosed within r = sqrt(R^2+z^2) is returned when not z is None; forceint will integrate between -z and z, so the two are inconsistent (If you care to have this changed, raise an issue on github) 2) for axisymmetric potentials: M(<R,<fabs(Z)) HISTORY: 2014-01-29 - Written - Bovy (IAS) ] if name[self].isNonAxi begin[:] <ast.Raise object at 0x7da1b0c96cb0> <ast.Try object at 0x7da1b0c97940>
keyword[def] identifier[mass] ( identifier[self] , identifier[R] , identifier[z] = keyword[None] , identifier[t] = literal[int] , identifier[forceint] = keyword[False] ): literal[string] keyword[if] identifier[self] . identifier[isNonAxi] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[try] : keyword[if] identifier[forceint] : keyword[raise] identifier[AttributeError] keyword[return] identifier[self] . identifier[_amp] * identifier[self] . identifier[_mass] ( identifier[R] , identifier[z] = identifier[z] , identifier[t] = identifier[t] ) keyword[except] identifier[AttributeError] : keyword[if] identifier[z] keyword[is] keyword[None] : keyword[return] literal[int] * identifier[nu] . identifier[pi] * identifier[integrate] . identifier[quad] ( keyword[lambda] identifier[x] : identifier[x] ** literal[int] * identifier[self] . identifier[dens] ( identifier[x] , literal[int] , identifier[use_physical] = keyword[False] ), literal[int] , identifier[R] )[ literal[int] ] keyword[else] : keyword[return] literal[int] * identifier[nu] . identifier[pi] * identifier[integrate] . identifier[dblquad] ( keyword[lambda] identifier[y] , identifier[x] : identifier[x] * identifier[self] . identifier[dens] ( identifier[x] , identifier[y] , identifier[use_physical] = keyword[False] ), literal[int] , identifier[R] , keyword[lambda] identifier[x] : literal[int] , keyword[lambda] identifier[x] : identifier[z] )[ literal[int] ]
def mass(self, R, z=None, t=0.0, forceint=False): """ NAME: mass PURPOSE: evaluate the mass enclosed INPUT: R - Cylindrical Galactocentric radius (can be Quantity) z= (None) vertical height (can be Quantity) t - time (optional; can be Quantity) KEYWORDS: forceint= if True, calculate the mass through integration of the density, even if an explicit expression for the mass exists OUTPUT: 1) for spherical potentials: M(<R) [or if z is None], when the mass is implemented explicitly, the mass enclosed within r = sqrt(R^2+z^2) is returned when not z is None; forceint will integrate between -z and z, so the two are inconsistent (If you care to have this changed, raise an issue on github) 2) for axisymmetric potentials: M(<R,<fabs(Z)) HISTORY: 2014-01-29 - Written - Bovy (IAS) """ if self.isNonAxi: raise NotImplementedError('mass for non-axisymmetric potentials is not currently supported') # depends on [control=['if'], data=[]] try: if forceint: raise AttributeError #Hack! # depends on [control=['if'], data=[]] return self._amp * self._mass(R, z=z, t=t) # depends on [control=['try'], data=[]] except AttributeError: #Use numerical integration to get the mass if z is None: return 4.0 * nu.pi * integrate.quad(lambda x: x ** 2.0 * self.dens(x, 0.0, use_physical=False), 0.0, R)[0] # depends on [control=['if'], data=[]] else: return 4.0 * nu.pi * integrate.dblquad(lambda y, x: x * self.dens(x, y, use_physical=False), 0.0, R, lambda x: 0.0, lambda x: z)[0] # depends on [control=['except'], data=[]]
def transform(geom, to_sref): """Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int """ # If we have an envelope, assume it's in the target sref. try: geom = getattr(geom, 'polygon', Envelope(geom).polygon) except (TypeError, ValueError): pass else: geom.AssignSpatialReference(to_sref) try: geom_sref = geom.GetSpatialReference() except AttributeError: return transform(Geometry(geom), to_sref) if geom_sref is None: raise Exception('Cannot transform from unknown spatial reference') # Reproject geom if necessary if not geom_sref.IsSame(to_sref): geom = geom.Clone() geom.TransformTo(to_sref) return geom
def function[transform, parameter[geom, to_sref]]: constant[Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int ] <ast.Try object at 0x7da18f00d420> <ast.Try object at 0x7da18f00cbe0> if compare[name[geom_sref] is constant[None]] begin[:] <ast.Raise object at 0x7da18f00d8d0> if <ast.UnaryOp object at 0x7da18f00caf0> begin[:] variable[geom] assign[=] call[name[geom].Clone, parameter[]] call[name[geom].TransformTo, parameter[name[to_sref]]] return[name[geom]]
keyword[def] identifier[transform] ( identifier[geom] , identifier[to_sref] ): literal[string] keyword[try] : identifier[geom] = identifier[getattr] ( identifier[geom] , literal[string] , identifier[Envelope] ( identifier[geom] ). identifier[polygon] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[pass] keyword[else] : identifier[geom] . identifier[AssignSpatialReference] ( identifier[to_sref] ) keyword[try] : identifier[geom_sref] = identifier[geom] . identifier[GetSpatialReference] () keyword[except] identifier[AttributeError] : keyword[return] identifier[transform] ( identifier[Geometry] ( identifier[geom] ), identifier[to_sref] ) keyword[if] identifier[geom_sref] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] keyword[not] identifier[geom_sref] . identifier[IsSame] ( identifier[to_sref] ): identifier[geom] = identifier[geom] . identifier[Clone] () identifier[geom] . identifier[TransformTo] ( identifier[to_sref] ) keyword[return] identifier[geom]
def transform(geom, to_sref): """Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int """ # If we have an envelope, assume it's in the target sref. try: geom = getattr(geom, 'polygon', Envelope(geom).polygon) # depends on [control=['try'], data=[]] except (TypeError, ValueError): pass # depends on [control=['except'], data=[]] else: geom.AssignSpatialReference(to_sref) try: geom_sref = geom.GetSpatialReference() # depends on [control=['try'], data=[]] except AttributeError: return transform(Geometry(geom), to_sref) # depends on [control=['except'], data=[]] if geom_sref is None: raise Exception('Cannot transform from unknown spatial reference') # depends on [control=['if'], data=[]] # Reproject geom if necessary if not geom_sref.IsSame(to_sref): geom = geom.Clone() geom.TransformTo(to_sref) # depends on [control=['if'], data=[]] return geom
def upgrade(yes, dry_run, patches): """ Upgrade the datamodel by applying recusively the patches available """ patcher = _get_mongopatcher() if dry_run: patcher.discover_and_apply(directory=patches, dry_run=dry_run) else: if (yes or prompt_bool("Are you sure you want to alter %s" % green(patcher.db))): patcher.discover_and_apply(patches) else: raise SystemExit('You changed your mind, exiting...')
def function[upgrade, parameter[yes, dry_run, patches]]: constant[ Upgrade the datamodel by applying recusively the patches available ] variable[patcher] assign[=] call[name[_get_mongopatcher], parameter[]] if name[dry_run] begin[:] call[name[patcher].discover_and_apply, parameter[]]
keyword[def] identifier[upgrade] ( identifier[yes] , identifier[dry_run] , identifier[patches] ): literal[string] identifier[patcher] = identifier[_get_mongopatcher] () keyword[if] identifier[dry_run] : identifier[patcher] . identifier[discover_and_apply] ( identifier[directory] = identifier[patches] , identifier[dry_run] = identifier[dry_run] ) keyword[else] : keyword[if] ( identifier[yes] keyword[or] identifier[prompt_bool] ( literal[string] % identifier[green] ( identifier[patcher] . identifier[db] ))): identifier[patcher] . identifier[discover_and_apply] ( identifier[patches] ) keyword[else] : keyword[raise] identifier[SystemExit] ( literal[string] )
def upgrade(yes, dry_run, patches): """ Upgrade the datamodel by applying recusively the patches available """ patcher = _get_mongopatcher() if dry_run: patcher.discover_and_apply(directory=patches, dry_run=dry_run) # depends on [control=['if'], data=[]] elif yes or prompt_bool('Are you sure you want to alter %s' % green(patcher.db)): patcher.discover_and_apply(patches) # depends on [control=['if'], data=[]] else: raise SystemExit('You changed your mind, exiting...')
def order_value(id_or_ins, cash_amount, price=None, style=None): """ 使用想要花费的金钱买入/卖出股票,而不是买入/卖出想要的股数,正数代表买入,负数代表卖出。股票的股数总是会被调整成对应的100的倍数(在A中国A股市场1手是100股)。如果资金不足,该API将不会创建发送订单。 需要注意: 当您提交一个买单时,cash_amount 代表的含义是您希望买入股票消耗的金额(包含税费),最终买入的股数不仅和发单的价格有关,还和税费相关的参数设置有关。 当您提交一个卖单时,cash_amount 代表的意义是您希望卖出股票的总价值。如果金额超出了您所持有股票的价值,那么您将卖出所有股票。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param float cash_amount: 需要花费现金购买/卖出证券的数目。正数代表买入,负数代表卖出。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #花费最多¥10000买入平安银行股票,并以市价单发送。具体下单的数量与您策略税费相关的配置有关。 order_value('000001.XSHE', 10000) #卖出价值¥10000的现在持有的平安银行: order_value('000001.XSHE', -10000) """ style = cal_style(price, style) if isinstance(style, LimitOrder): if style.get_limit_price() <= 0: raise RQInvalidArgument(_(u"Limit order price should be positive")) order_book_id = assure_stock_order_book_id(id_or_ins) env = Environment.get_instance() price = env.get_last_price(order_book_id) if not is_valid_price(price): user_system_log.warn( _(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=order_book_id)) return account = env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name] if cash_amount > 0: cash_amount = min(cash_amount, account.cash) price = price if isinstance(style, MarketOrder) else style.get_limit_price() amount = int(Decimal(cash_amount) / Decimal(price)) if cash_amount > 0: round_lot = int(env.get_instrument(order_book_id).round_lot) # FIXME: logic duplicate with order_shares amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot while amount > 0: dummy_order = Order.__from_create__(order_book_id, amount, SIDE.BUY, style, POSITION_EFFECT.OPEN) expected_transaction_cost = env.get_order_transaction_cost(DEFAULT_ACCOUNT_TYPE.STOCK, dummy_order) if amount * price + expected_transaction_cost <= cash_amount: break amount -= round_lot else: user_system_log.warn(_(u"Order Creation Failed: 0 order quantity")) return # if the cash_amount is larger than you current security’s position, # then it will sell all shares of this security. position = account.positions[order_book_id] amount = downsize_amount(amount, position) return order_shares(order_book_id, amount, style=style)
def function[order_value, parameter[id_or_ins, cash_amount, price, style]]: constant[ 使用想要花费的金钱买入/卖出股票,而不是买入/卖出想要的股数,正数代表买入,负数代表卖出。股票的股数总是会被调整成对应的100的倍数(在A中国A股市场1手是100股)。如果资金不足,该API将不会创建发送订单。 需要注意: 当您提交一个买单时,cash_amount 代表的含义是您希望买入股票消耗的金额(包含税费),最终买入的股数不仅和发单的价格有关,还和税费相关的参数设置有关。 当您提交一个卖单时,cash_amount 代表的意义是您希望卖出股票的总价值。如果金额超出了您所持有股票的价值,那么您将卖出所有股票。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param float cash_amount: 需要花费现金购买/卖出证券的数目。正数代表买入,负数代表卖出。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #花费最多¥10000买入平安银行股票,并以市价单发送。具体下单的数量与您策略税费相关的配置有关。 order_value('000001.XSHE', 10000) #卖出价值¥10000的现在持有的平安银行: order_value('000001.XSHE', -10000) ] variable[style] assign[=] call[name[cal_style], parameter[name[price], name[style]]] if call[name[isinstance], parameter[name[style], name[LimitOrder]]] begin[:] if compare[call[name[style].get_limit_price, parameter[]] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b211c850> variable[order_book_id] assign[=] call[name[assure_stock_order_book_id], parameter[name[id_or_ins]]] variable[env] assign[=] call[name[Environment].get_instance, parameter[]] variable[price] assign[=] call[name[env].get_last_price, parameter[name[order_book_id]]] if <ast.UnaryOp object at 0x7da1b211d3f0> begin[:] call[name[user_system_log].warn, parameter[call[call[name[_], parameter[constant[Order Creation Failed: [{order_book_id}] No market data]]].format, parameter[]]]] return[None] variable[account] assign[=] call[name[env].portfolio.accounts][name[DEFAULT_ACCOUNT_TYPE].STOCK.name] if compare[name[cash_amount] greater[>] constant[0]] begin[:] variable[cash_amount] assign[=] call[name[min], parameter[name[cash_amount], name[account].cash]] variable[price] assign[=] <ast.IfExp object at 0x7da1b211c3d0> variable[amount] assign[=] call[name[int], parameter[binary_operation[call[name[Decimal], parameter[name[cash_amount]]] / call[name[Decimal], parameter[name[price]]]]]] if compare[name[cash_amount] greater[>] constant[0]] begin[:] variable[round_lot] assign[=] call[name[int], parameter[call[name[env].get_instrument, parameter[name[order_book_id]]].round_lot]] variable[amount] assign[=] binary_operation[call[name[int], parameter[binary_operation[call[name[Decimal], parameter[name[amount]]] / call[name[Decimal], parameter[name[round_lot]]]]]] * name[round_lot]] while compare[name[amount] greater[>] constant[0]] begin[:] variable[dummy_order] assign[=] call[name[Order].__from_create__, parameter[name[order_book_id], name[amount], name[SIDE].BUY, name[style], name[POSITION_EFFECT].OPEN]] variable[expected_transaction_cost] assign[=] call[name[env].get_order_transaction_cost, parameter[name[DEFAULT_ACCOUNT_TYPE].STOCK, name[dummy_order]]] if compare[binary_operation[binary_operation[name[amount] * name[price]] + name[expected_transaction_cost]] less_or_equal[<=] name[cash_amount]] begin[:] break <ast.AugAssign object at 0x7da18c4cc580> variable[position] assign[=] call[name[account].positions][name[order_book_id]] variable[amount] assign[=] call[name[downsize_amount], parameter[name[amount], name[position]]] return[call[name[order_shares], parameter[name[order_book_id], name[amount]]]]
keyword[def] identifier[order_value] ( identifier[id_or_ins] , identifier[cash_amount] , identifier[price] = keyword[None] , identifier[style] = keyword[None] ): literal[string] identifier[style] = identifier[cal_style] ( identifier[price] , identifier[style] ) keyword[if] identifier[isinstance] ( identifier[style] , identifier[LimitOrder] ): keyword[if] identifier[style] . identifier[get_limit_price] ()<= literal[int] : keyword[raise] identifier[RQInvalidArgument] ( identifier[_] ( literal[string] )) identifier[order_book_id] = identifier[assure_stock_order_book_id] ( identifier[id_or_ins] ) identifier[env] = identifier[Environment] . identifier[get_instance] () identifier[price] = identifier[env] . identifier[get_last_price] ( identifier[order_book_id] ) keyword[if] keyword[not] identifier[is_valid_price] ( identifier[price] ): identifier[user_system_log] . identifier[warn] ( identifier[_] ( literal[string] ). identifier[format] ( identifier[order_book_id] = identifier[order_book_id] )) keyword[return] identifier[account] = identifier[env] . identifier[portfolio] . identifier[accounts] [ identifier[DEFAULT_ACCOUNT_TYPE] . identifier[STOCK] . identifier[name] ] keyword[if] identifier[cash_amount] > literal[int] : identifier[cash_amount] = identifier[min] ( identifier[cash_amount] , identifier[account] . identifier[cash] ) identifier[price] = identifier[price] keyword[if] identifier[isinstance] ( identifier[style] , identifier[MarketOrder] ) keyword[else] identifier[style] . identifier[get_limit_price] () identifier[amount] = identifier[int] ( identifier[Decimal] ( identifier[cash_amount] )/ identifier[Decimal] ( identifier[price] )) keyword[if] identifier[cash_amount] > literal[int] : identifier[round_lot] = identifier[int] ( identifier[env] . identifier[get_instrument] ( identifier[order_book_id] ). identifier[round_lot] ) identifier[amount] = identifier[int] ( identifier[Decimal] ( identifier[amount] )/ identifier[Decimal] ( identifier[round_lot] ))* identifier[round_lot] keyword[while] identifier[amount] > literal[int] : identifier[dummy_order] = identifier[Order] . identifier[__from_create__] ( identifier[order_book_id] , identifier[amount] , identifier[SIDE] . identifier[BUY] , identifier[style] , identifier[POSITION_EFFECT] . identifier[OPEN] ) identifier[expected_transaction_cost] = identifier[env] . identifier[get_order_transaction_cost] ( identifier[DEFAULT_ACCOUNT_TYPE] . identifier[STOCK] , identifier[dummy_order] ) keyword[if] identifier[amount] * identifier[price] + identifier[expected_transaction_cost] <= identifier[cash_amount] : keyword[break] identifier[amount] -= identifier[round_lot] keyword[else] : identifier[user_system_log] . identifier[warn] ( identifier[_] ( literal[string] )) keyword[return] identifier[position] = identifier[account] . identifier[positions] [ identifier[order_book_id] ] identifier[amount] = identifier[downsize_amount] ( identifier[amount] , identifier[position] ) keyword[return] identifier[order_shares] ( identifier[order_book_id] , identifier[amount] , identifier[style] = identifier[style] )
def order_value(id_or_ins, cash_amount, price=None, style=None): """ 使用想要花费的金钱买入/卖出股票,而不是买入/卖出想要的股数,正数代表买入,负数代表卖出。股票的股数总是会被调整成对应的100的倍数(在A中国A股市场1手是100股)。如果资金不足,该API将不会创建发送订单。 需要注意: 当您提交一个买单时,cash_amount 代表的含义是您希望买入股票消耗的金额(包含税费),最终买入的股数不仅和发单的价格有关,还和税费相关的参数设置有关。 当您提交一个卖单时,cash_amount 代表的意义是您希望卖出股票的总价值。如果金额超出了您所持有股票的价值,那么您将卖出所有股票。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param float cash_amount: 需要花费现金购买/卖出证券的数目。正数代表买入,负数代表卖出。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #花费最多¥10000买入平安银行股票,并以市价单发送。具体下单的数量与您策略税费相关的配置有关。 order_value('000001.XSHE', 10000) #卖出价值¥10000的现在持有的平安银行: order_value('000001.XSHE', -10000) """ style = cal_style(price, style) if isinstance(style, LimitOrder): if style.get_limit_price() <= 0: raise RQInvalidArgument(_(u'Limit order price should be positive')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] order_book_id = assure_stock_order_book_id(id_or_ins) env = Environment.get_instance() price = env.get_last_price(order_book_id) if not is_valid_price(price): user_system_log.warn(_(u'Order Creation Failed: [{order_book_id}] No market data').format(order_book_id=order_book_id)) return # depends on [control=['if'], data=[]] account = env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name] if cash_amount > 0: cash_amount = min(cash_amount, account.cash) # depends on [control=['if'], data=['cash_amount']] price = price if isinstance(style, MarketOrder) else style.get_limit_price() amount = int(Decimal(cash_amount) / Decimal(price)) if cash_amount > 0: round_lot = int(env.get_instrument(order_book_id).round_lot) # FIXME: logic duplicate with order_shares amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot while amount > 0: dummy_order = Order.__from_create__(order_book_id, amount, SIDE.BUY, style, POSITION_EFFECT.OPEN) expected_transaction_cost = env.get_order_transaction_cost(DEFAULT_ACCOUNT_TYPE.STOCK, dummy_order) if amount * price + expected_transaction_cost <= cash_amount: break # depends on [control=['if'], data=[]] amount -= round_lot # depends on [control=['while'], data=['amount']] else: user_system_log.warn(_(u'Order Creation Failed: 0 order quantity')) return # depends on [control=['if'], data=['cash_amount']] # if the cash_amount is larger than you current security’s position, # then it will sell all shares of this security. position = account.positions[order_book_id] amount = downsize_amount(amount, position) return order_shares(order_book_id, amount, style=style)
def find_path_BFS(Graph,n,m): """ Breadth first search """ if m not in Graph: return None if n == m: return [m] path = [[n]] searched = [] while True: j = len(path) #k = len(Graph[n]) for i in range(j): node = path[i][-1] for neighbor in Graph[node]: if neighbor not in searched: path.append(path[i]+[neighbor]) searched.append(neighbor) if neighbor==m: return path[-1] for i in range(j): path.pop(0) return path
def function[find_path_BFS, parameter[Graph, n, m]]: constant[ Breadth first search ] if compare[name[m] <ast.NotIn object at 0x7da2590d7190> name[Graph]] begin[:] return[constant[None]] if compare[name[n] equal[==] name[m]] begin[:] return[list[[<ast.Name object at 0x7da20e9b04f0>]]] variable[path] assign[=] list[[<ast.List object at 0x7da20e9b0dc0>]] variable[searched] assign[=] list[[]] while constant[True] begin[:] variable[j] assign[=] call[name[len], parameter[name[path]]] for taget[name[i]] in starred[call[name[range], parameter[name[j]]]] begin[:] variable[node] assign[=] call[call[name[path]][name[i]]][<ast.UnaryOp object at 0x7da20e9b3460>] for taget[name[neighbor]] in starred[call[name[Graph]][name[node]]] begin[:] if compare[name[neighbor] <ast.NotIn object at 0x7da2590d7190> name[searched]] begin[:] call[name[path].append, parameter[binary_operation[call[name[path]][name[i]] + list[[<ast.Name object at 0x7da20e9b0700>]]]]] call[name[searched].append, parameter[name[neighbor]]] if compare[name[neighbor] equal[==] name[m]] begin[:] return[call[name[path]][<ast.UnaryOp object at 0x7da20e9b0040>]] for taget[name[i]] in starred[call[name[range], parameter[name[j]]]] begin[:] call[name[path].pop, parameter[constant[0]]] return[name[path]]
keyword[def] identifier[find_path_BFS] ( identifier[Graph] , identifier[n] , identifier[m] ): literal[string] keyword[if] identifier[m] keyword[not] keyword[in] identifier[Graph] : keyword[return] keyword[None] keyword[if] identifier[n] == identifier[m] : keyword[return] [ identifier[m] ] identifier[path] =[[ identifier[n] ]] identifier[searched] =[] keyword[while] keyword[True] : identifier[j] = identifier[len] ( identifier[path] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[j] ): identifier[node] = identifier[path] [ identifier[i] ][- literal[int] ] keyword[for] identifier[neighbor] keyword[in] identifier[Graph] [ identifier[node] ]: keyword[if] identifier[neighbor] keyword[not] keyword[in] identifier[searched] : identifier[path] . identifier[append] ( identifier[path] [ identifier[i] ]+[ identifier[neighbor] ]) identifier[searched] . identifier[append] ( identifier[neighbor] ) keyword[if] identifier[neighbor] == identifier[m] : keyword[return] identifier[path] [- literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[j] ): identifier[path] . identifier[pop] ( literal[int] ) keyword[return] identifier[path]
def find_path_BFS(Graph, n, m): """ Breadth first search """ if m not in Graph: return None # depends on [control=['if'], data=[]] if n == m: return [m] # depends on [control=['if'], data=['m']] path = [[n]] searched = [] while True: j = len(path) #k = len(Graph[n]) for i in range(j): node = path[i][-1] for neighbor in Graph[node]: if neighbor not in searched: path.append(path[i] + [neighbor]) searched.append(neighbor) if neighbor == m: return path[-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['neighbor', 'searched']] # depends on [control=['for'], data=['neighbor']] # depends on [control=['for'], data=['i']] for i in range(j): path.pop(0) # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]] return path
def remove_state(self, state_id, recursive=True, force=False, destroy=True): """ Overwrite the parent class remove state method by checking if the user tries to delete the decider state :param state_id: the id of the state to remove :param recursive: a flag to indicate a recursive disassembling of all substates :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the barrier concurrency state) :param destroy: a flag which indicates if the state should not only be disconnected from the state but also destroyed, including all its state elements :raises exceptions.AttributeError: if the state_id parameter is the decider state """ if state_id == UNIQUE_DECIDER_STATE_ID and force is False: raise AttributeError("You are not allowed to delete the decider state.") else: return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)
def function[remove_state, parameter[self, state_id, recursive, force, destroy]]: constant[ Overwrite the parent class remove state method by checking if the user tries to delete the decider state :param state_id: the id of the state to remove :param recursive: a flag to indicate a recursive disassembling of all substates :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the barrier concurrency state) :param destroy: a flag which indicates if the state should not only be disconnected from the state but also destroyed, including all its state elements :raises exceptions.AttributeError: if the state_id parameter is the decider state ] if <ast.BoolOp object at 0x7da1b1b9c280> begin[:] <ast.Raise object at 0x7da1b1b9d2a0>
keyword[def] identifier[remove_state] ( identifier[self] , identifier[state_id] , identifier[recursive] = keyword[True] , identifier[force] = keyword[False] , identifier[destroy] = keyword[True] ): literal[string] keyword[if] identifier[state_id] == identifier[UNIQUE_DECIDER_STATE_ID] keyword[and] identifier[force] keyword[is] keyword[False] : keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[else] : keyword[return] identifier[ContainerState] . identifier[remove_state] ( identifier[self] , identifier[state_id] , identifier[recursive] = identifier[recursive] , identifier[force] = identifier[force] , identifier[destroy] = identifier[destroy] )
def remove_state(self, state_id, recursive=True, force=False, destroy=True): """ Overwrite the parent class remove state method by checking if the user tries to delete the decider state :param state_id: the id of the state to remove :param recursive: a flag to indicate a recursive disassembling of all substates :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the barrier concurrency state) :param destroy: a flag which indicates if the state should not only be disconnected from the state but also destroyed, including all its state elements :raises exceptions.AttributeError: if the state_id parameter is the decider state """ if state_id == UNIQUE_DECIDER_STATE_ID and force is False: raise AttributeError('You are not allowed to delete the decider state.') # depends on [control=['if'], data=[]] else: return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)