query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Retrieve parts belonging to this activity .
def parts ( self , * args , * * kwargs ) : return self . _client . parts ( * args , activity = self . id , * * kwargs )
4,400
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L120-L139
[ "def", "_fetch_secrets", "(", "vault_url", ",", "path", ",", "token", ")", ":", "url", "=", "_url_joiner", "(", "vault_url", ",", "'v1'", ",", "path", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "VaultLoader", ".", "_get_headers", "(", "token", ")", ")", "resp", ".", "raise_for_status", "(", ")", "data", "=", "resp", ".", "json", "(", ")", "if", "data", ".", "get", "(", "'errors'", ")", ":", "raise", "VaultException", "(", "u'Error fetching Vault secrets from path {}: {}'", ".", "format", "(", "path", ",", "data", "[", "'errors'", "]", ")", ")", "return", "data", "[", "'data'", "]" ]
Retrieve models and instances belonging to this activity .
def associated_parts ( self , * args , * * kwargs ) : return ( self . parts ( category = Category . MODEL , * args , * * kwargs ) , self . parts ( category = Category . INSTANCE , * args , * * kwargs ) )
4,401
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L141-L166
[ "def", "external_metadata", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "if", "datasource_type", "==", "'druid'", ":", "datasource", "=", "ConnectorRegistry", ".", "get_datasource", "(", "datasource_type", ",", "datasource_id", ",", "db", ".", "session", ")", "elif", "datasource_type", "==", "'table'", ":", "database", "=", "(", "db", ".", "session", ".", "query", "(", "Database", ")", ".", "filter_by", "(", "id", "=", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", ".", "one", "(", ")", ")", "Table", "=", "ConnectorRegistry", ".", "sources", "[", "'table'", "]", "datasource", "=", "Table", "(", "database", "=", "database", ",", "table_name", "=", "request", ".", "args", ".", "get", "(", "'table_name'", ")", ",", "schema", "=", "request", ".", "args", ".", "get", "(", "'schema'", ")", "or", "None", ",", ")", "external_metadata", "=", "datasource", ".", "external_metadata", "(", ")", "return", "self", ".", "json_response", "(", "external_metadata", ")" ]
Retrieve the subprocess in which this activity is defined .
def subprocess ( self ) : subprocess_id = self . _json_data . get ( 'container' ) if subprocess_id == self . _json_data . get ( 'root_container' ) : raise NotFoundError ( "Cannot find subprocess for this task '{}', " "as this task exist on top level." . format ( self . name ) ) return self . _client . activity ( pk = subprocess_id , scope = self . scope_id )
4,402
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L199-L218
[ "def", "_retrieve_offsets", "(", "self", ",", "timestamps", ",", "timeout_ms", "=", "float", "(", "\"inf\"", ")", ")", ":", "if", "not", "timestamps", ":", "return", "{", "}", "start_time", "=", "time", ".", "time", "(", ")", "remaining_ms", "=", "timeout_ms", "while", "remaining_ms", ">", "0", ":", "future", "=", "self", ".", "_send_offset_requests", "(", "timestamps", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "future", ",", "timeout_ms", "=", "remaining_ms", ")", "if", "future", ".", "succeeded", "(", ")", ":", "return", "future", ".", "value", "if", "not", "future", ".", "retriable", "(", ")", ":", "raise", "future", ".", "exception", "# pylint: disable-msg=raising-bad-type", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining_ms", "<", "0", ":", "break", "if", "future", ".", "exception", ".", "invalid_metadata", ":", "refresh_future", "=", "self", ".", "_client", ".", "cluster", ".", "request_update", "(", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "refresh_future", ",", "timeout_ms", "=", "remaining_ms", ")", "else", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "'retry_backoff_ms'", "]", "/", "1000.0", ")", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "raise", "Errors", ".", "KafkaTimeoutError", "(", "\"Failed to get offsets by timestamps in %s ms\"", "%", "(", "timeout_ms", ",", ")", ")" ]
Retrieve the other activities that also belong to the subprocess .
def siblings ( self , * * kwargs ) : container_id = self . _json_data . get ( 'container' ) return self . _client . activities ( container = container_id , scope = self . scope_id , * * kwargs )
4,403
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L248-L269
[ "def", "_retrieve_offsets", "(", "self", ",", "timestamps", ",", "timeout_ms", "=", "float", "(", "\"inf\"", ")", ")", ":", "if", "not", "timestamps", ":", "return", "{", "}", "start_time", "=", "time", ".", "time", "(", ")", "remaining_ms", "=", "timeout_ms", "while", "remaining_ms", ">", "0", ":", "future", "=", "self", ".", "_send_offset_requests", "(", "timestamps", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "future", ",", "timeout_ms", "=", "remaining_ms", ")", "if", "future", ".", "succeeded", "(", ")", ":", "return", "future", ".", "value", "if", "not", "future", ".", "retriable", "(", ")", ":", "raise", "future", ".", "exception", "# pylint: disable-msg=raising-bad-type", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining_ms", "<", "0", ":", "break", "if", "future", ".", "exception", ".", "invalid_metadata", ":", "refresh_future", "=", "self", ".", "_client", ".", "cluster", ".", "request_update", "(", ")", "self", ".", "_client", ".", "poll", "(", "future", "=", "refresh_future", ",", "timeout_ms", "=", "remaining_ms", ")", "else", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "'retry_backoff_ms'", "]", "/", "1000.0", ")", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", "remaining_ms", "=", "timeout_ms", "-", "elapsed_ms", "raise", "Errors", ".", "KafkaTimeoutError", "(", "\"Failed to get offsets by timestamps in %s ms\"", "%", "(", "timeout_ms", ",", ")", ")" ]
Create a new activity belonging to this subprocess .
def create ( self , * args , * * kwargs ) : if self . activity_type != ActivityType . SUBPROCESS : raise IllegalArgumentError ( "One can only create a task under a subprocess." ) return self . _client . create_activity ( self . id , * args , * * kwargs )
4,404
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L271-L281
[ "def", "_fetch_secrets", "(", "vault_url", ",", "path", ",", "token", ")", ":", "url", "=", "_url_joiner", "(", "vault_url", ",", "'v1'", ",", "path", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "VaultLoader", ".", "_get_headers", "(", "token", ")", ")", "resp", ".", "raise_for_status", "(", ")", "data", "=", "resp", ".", "json", "(", ")", "if", "data", ".", "get", "(", "'errors'", ")", ":", "raise", "VaultException", "(", "u'Error fetching Vault secrets from path {}: {}'", ".", "format", "(", "path", ",", "data", "[", "'errors'", "]", ")", ")", "return", "data", "[", "'data'", "]" ]
Get a customization object representing the customization of the activity .
def customization ( self ) : from . customization import ExtCustomization # For now, we only allow customization in an Ext JS context return ExtCustomization ( activity = self , client = self . _client )
4,405
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L398-L417
[ "def", "destroy", "(", "self", ")", ":", "subnets", "=", "(", "str", "(", "net", ".", "gw", "(", ")", ")", "for", "net", "in", "self", ".", "virt_env", ".", "get_nets", "(", ")", ".", "itervalues", "(", ")", ")", "self", ".", "_subnet_store", ".", "release", "(", "subnets", ")", "self", ".", "cleanup", "(", ")", "shutil", ".", "rmtree", "(", "self", ".", "paths", ".", "prefix_path", "(", ")", ")" ]
Get all stop places and quays
def all_stop_places_quays ( self ) -> list : all_places = self . stops . copy ( ) for quay in self . quays : all_places . append ( quay ) return all_places
4,406
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L68-L73
[ "def", "initialize_schema", "(", "connection", ")", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"PRAGMA application_id={}\"", ".", "format", "(", "_TENSORBOARD_APPLICATION_ID", ")", ")", "cursor", ".", "execute", "(", "\"PRAGMA user_version={}\"", ".", "format", "(", "_TENSORBOARD_USER_VERSION", ")", ")", "with", "connection", ":", "for", "statement", "in", "_SCHEMA_STATEMENTS", ":", "lines", "=", "statement", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "message", "=", "lines", "[", "0", "]", "+", "(", "'...'", "if", "len", "(", "lines", ")", ">", "1", "else", "''", ")", "logger", ".", "debug", "(", "'Running DB init statement: %s'", ",", "message", ")", "cursor", ".", "execute", "(", "statement", ")" ]
Find all quays from stop places .
async def expand_all_quays ( self ) -> None : if not self . stops : return headers = { 'ET-Client-Name' : self . _client_name } request = { 'query' : GRAPHQL_STOP_TO_QUAY_TEMPLATE , 'variables' : { 'stops' : self . stops , 'omitNonBoarding' : self . omit_non_boarding } } with async_timeout . timeout ( 10 ) : resp = await self . web_session . post ( RESOURCE , json = request , headers = headers ) if resp . status != 200 : _LOGGER . error ( "Error connecting to Entur, response http status code: %s" , resp . status ) return None result = await resp . json ( ) if 'errors' in result : return for stop_place in result [ 'data' ] [ 'stopPlaces' ] : if len ( stop_place [ 'quays' ] ) > 1 : for quay in stop_place [ 'quays' ] : if quay [ 'estimatedCalls' ] : self . quays . append ( quay [ 'id' ] )
4,407
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L75-L108
[ "def", "generate_changelog", "(", "context", ")", ":", "changelog_content", "=", "[", "'\\n## [%s](%s/compare/%s...%s)\\n\\n'", "%", "(", "context", ".", "new_version", ",", "context", ".", "repo_url", ",", "context", ".", "current_version", ",", "context", ".", "new_version", ",", ")", "]", "git_log_content", "=", "None", "git_log", "=", "'log --oneline --no-merges --no-color'", ".", "split", "(", "' '", ")", "try", ":", "git_log_tag", "=", "git_log", "+", "[", "'%s..master'", "%", "context", ".", "current_version", "]", "git_log_content", "=", "git", "(", "git_log_tag", ")", "log", ".", "debug", "(", "'content: %s'", "%", "git_log_content", ")", "except", "Exception", ":", "log", ".", "warn", "(", "'Error diffing previous version, initial release'", ")", "git_log_content", "=", "git", "(", "git_log", ")", "git_log_content", "=", "replace_sha_with_commit_link", "(", "context", ".", "repo_url", ",", "git_log_content", ")", "# turn change log entries into markdown bullet points", "if", "git_log_content", ":", "[", "changelog_content", ".", "append", "(", "'* %s\\n'", "%", "line", ")", "if", "line", "else", "line", "for", "line", "in", "git_log_content", "[", ":", "-", "1", "]", "]", "write_new_changelog", "(", "context", ".", "repo_url", ",", "'CHANGELOG.md'", ",", "changelog_content", ",", "dry_run", "=", "context", ".", "dry_run", ")", "log", ".", "info", "(", "'Added content to CHANGELOG.md'", ")", "context", ".", "changelog_content", "=", "changelog_content" ]
Get the latest data from api . entur . org .
async def update ( self ) -> None : headers = { 'ET-Client-Name' : self . _client_name } request = { 'query' : self . get_gql_query ( ) , 'variables' : { 'stops' : self . stops , 'quays' : self . quays , 'whitelist' : { 'lines' : self . line_whitelist } , 'numberOfDepartures' : self . number_of_departures , 'omitNonBoarding' : self . omit_non_boarding } } with async_timeout . timeout ( 10 ) : resp = await self . web_session . post ( RESOURCE , json = request , headers = headers ) if resp . status != 200 : _LOGGER . error ( "Error connecting to Entur, response http status code: %s" , resp . status ) return None result = await resp . json ( ) if 'errors' in result : _LOGGER . warning ( "Entur API responded with error message: {error}" , result [ 'errors' ] ) return self . _data = result [ 'data' ] if 'stopPlaces' in self . _data : for stop in self . _data [ 'stopPlaces' ] : self . _process_place ( stop , False ) if 'quays' in self . _data : for quay in self . _data [ 'quays' ] : self . _process_place ( quay , True )
4,408
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L110-L152
[ "def", "_strip_colors", "(", "self", ",", "message", ":", "str", ")", "->", "str", ":", "for", "c", "in", "self", ".", "COLORS", ":", "message", "=", "message", ".", "replace", "(", "c", ",", "\"\"", ")", "return", "message" ]
Extract information from place dictionary .
def _process_place ( self , place : dict , is_platform : bool ) -> None : place_id = place [ 'id' ] self . info [ place_id ] = Place ( place , is_platform )
4,409
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L158-L161
[ "def", "update_if_client", "(", "fctn", ")", ":", "@", "functools", ".", "wraps", "(", "fctn", ")", "def", "_update_if_client", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b", "=", "self", ".", "_bundle", "if", "b", "is", "None", "or", "not", "hasattr", "(", "b", ",", "'is_client'", ")", ":", "return", "fctn", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "b", ".", "is_client", "and", "(", "b", ".", "_last_client_update", "is", "None", "or", "(", "datetime", ".", "now", "(", ")", "-", "b", ".", "_last_client_update", ")", ".", "seconds", ">", "1", ")", ":", "b", ".", "client_update", "(", ")", "return", "fctn", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_update_if_client" ]
Converts a list of model instances to a list of dictionaries using their todict method .
def serializable_list ( olist , attrs_to_serialize = None , rels_to_expand = None , group_listrels_by = None , rels_to_serialize = None , key_modifications = None , groupby = None , keyvals_to_merge = None , preserve_order = False , dict_struct = None , dict_post_processors = None ) : if groupby : if preserve_order : result = json_encoder ( deep_group ( olist , keys = groupby , serializer = 'todict' , preserve_order = preserve_order , serializer_kwargs = { 'rels_to_serialize' : rels_to_serialize , 'rels_to_expand' : rels_to_expand , 'attrs_to_serialize' : attrs_to_serialize , 'group_listrels_by' : group_listrels_by , 'key_modifications' : key_modifications , 'dict_struct' : dict_struct , 'dict_post_processors' : dict_post_processors } ) ) else : result = deep_group ( olist , keys = groupby , serializer = 'todict' , preserve_order = preserve_order , serializer_kwargs = { 'rels_to_serialize' : rels_to_serialize , 'rels_to_expand' : rels_to_expand , 'attrs_to_serialize' : attrs_to_serialize , 'group_listrels_by' : group_listrels_by , 'key_modifications' : key_modifications , 'dict_struct' : dict_struct , 'dict_post_processors' : dict_post_processors } ) return result else : result_list = map ( lambda o : serialized_obj ( o , attrs_to_serialize = attrs_to_serialize , rels_to_expand = rels_to_expand , group_listrels_by = group_listrels_by , rels_to_serialize = rels_to_serialize , key_modifications = key_modifications , dict_struct = dict_struct , dict_post_processors = dict_post_processors ) , olist ) if keyvals_to_merge : result_list = [ merge ( obj_dict , kvdict ) for obj_dict , kvdict in zip ( result_list , keyvals_to_merge ) ] return result_list
4,410
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L96-L168
[ "def", "communityvisibilitystate", "(", "self", ")", ":", "if", "self", ".", "_communityvisibilitystate", "==", "None", ":", "return", "None", "elif", "self", ".", "_communityvisibilitystate", "in", "self", ".", "VisibilityState", ":", "return", "self", ".", "VisibilityState", "[", "self", ".", "_communityvisibilitystate", "]", "else", ":", "#Invalid State", "return", "None" ]
Provides a json dump of the struct
def jsoned ( struct , wrap = True , meta = None , struct_key = 'result' , pre_render_callback = None ) : return _json . dumps ( structured ( struct , wrap = wrap , meta = meta , struct_key = struct_key , pre_render_callback = pre_render_callback ) , default = json_encoder )
4,411
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L191-L216
[ "def", "handle_market_open", "(", "self", ",", "session_label", ",", "data_portal", ")", ":", "ledger", "=", "self", ".", "_ledger", "ledger", ".", "start_of_session", "(", "session_label", ")", "adjustment_reader", "=", "data_portal", ".", "adjustment_reader", "if", "adjustment_reader", "is", "not", "None", ":", "# this is None when running with a dataframe source", "ledger", ".", "process_dividends", "(", "session_label", ",", "self", ".", "_asset_finder", ",", "adjustment_reader", ",", ")", "self", ".", "_current_session", "=", "session_label", "cal", "=", "self", ".", "_trading_calendar", "self", ".", "_market_open", ",", "self", ".", "_market_close", "=", "self", ".", "_execution_open_and_close", "(", "cal", ",", "session_label", ",", ")", "self", ".", "start_of_session", "(", "ledger", ",", "session_label", ",", "data_portal", ")" ]
A decorator used to return a JSON response of a list of model objects . It expects the decorated function to return a list of model instances . It then converts the instances to dicts and serializes them into a json response
def as_list ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : response = func ( * args , * * kwargs ) if isinstance ( response , Response ) : return response return as_json_list ( response , * * _serializable_params ( request . args , check_groupby = True ) ) return wrapper
4,412
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L419-L441
[ "def", "compare", "(", "left", ":", "Optional", "[", "L", "]", ",", "right", ":", "Optional", "[", "R", "]", ")", "->", "'Comparison[L, R]'", ":", "if", "isinstance", "(", "left", ",", "File", ")", "and", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "FileDirectoryComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "and", "isinstance", "(", "right", ",", "File", ")", ":", "return", "DirectoryFileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "File", ")", "or", "isinstance", "(", "right", ",", "File", ")", ":", "return", "FileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "or", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "DirectoryComparison", "(", "left", ",", "right", ")", "raise", "TypeError", "(", "f'Cannot compare entities: {left}, {right}'", ")" ]
A decorator used to return a JSON response of a list of model objects . It differs from as_list in that it accepts a variety of querying parameters and can use them to filter and modify the results . It expects the decorated function to return either Model Class to query or a SQLAlchemy filter which exposes a subset of the instances of the Model class . It then converts the instances to dicts and serializes them into a json response
def as_processed_list ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : func_argspec = inspect . getargspec ( func ) func_args = func_argspec . args for kw in request . args : if ( kw in func_args and kw not in RESTRICTED and not any ( request . args . get ( kw ) . startswith ( op ) for op in OPERATORS ) and not any ( kw . endswith ( op ) for op in OPERATORS ) ) : kwargs [ kw ] = request . args . get ( kw ) func_output = func ( * args , * * kwargs ) return process_args_and_render_json_list ( func_output ) return wrapper
4,413
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L911-L946
[ "def", "n_failed", "(", "self", ")", ":", "return", "self", ".", "_counters", "[", "JobStatus", ".", "failed", "]", "+", "self", ".", "_counters", "[", "JobStatus", ".", "partial_failed", "]" ]
A decorator used to return a JSON response with a dict representation of the model instance . It expects the decorated function to return a Model instance . It then converts the instance to dicts and serializes it into a json response
def as_obj ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : response = func ( * args , * * kwargs ) return render_json_obj_with_requested_structure ( response ) return wrapper
4,414
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L949-L966
[ "def", "MemoryExceeded", "(", "self", ")", ":", "rss_size", "=", "self", ".", "proc", ".", "memory_info", "(", ")", ".", "rss", "return", "rss_size", "//", "1024", "//", "1024", ">", "config", ".", "CONFIG", "[", "\"Client.rss_max\"", "]" ]
Execute the service .
def execute ( self , interactive = False ) : url = self . _client . _build_url ( 'service_execute' , service_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( interactive = interactive , format = 'json' ) ) if response . status_code != requests . codes . accepted : # pragma: no cover raise APIError ( "Could not execute service '{}': {}" . format ( self , ( response . status_code , response . json ( ) ) ) ) data = response . json ( ) return ServiceExecution ( json = data . get ( 'results' ) [ 0 ] , client = self . _client )
4,415
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L51-L71
[ "def", "max_or", "(", "a", ",", "b", ",", "c", ",", "d", ",", "w", ")", ":", "m", "=", "(", "1", "<<", "(", "w", "-", "1", ")", ")", "while", "m", "!=", "0", ":", "if", "(", "b", "&", "d", "&", "m", ")", "!=", "0", ":", "temp", "=", "(", "b", "-", "m", ")", "|", "(", "m", "-", "1", ")", "if", "temp", ">=", "a", ":", "b", "=", "temp", "break", "temp", "=", "(", "d", "-", "m", ")", "|", "(", "m", "-", "1", ")", "if", "temp", ">=", "c", ":", "d", "=", "temp", "break", "m", ">>=", "1", "return", "b", "|", "d" ]
Edit Service details .
def edit ( self , name = None , description = None , version = None , * * kwargs ) : update_dict = { 'id' : self . id } if name : if not isinstance ( name , str ) : raise IllegalArgumentError ( "name should be provided as a string" ) update_dict . update ( { 'name' : name } ) if description : if not isinstance ( description , str ) : raise IllegalArgumentError ( "description should be provided as a string" ) update_dict . update ( { 'description' : description } ) if version : if not isinstance ( version , str ) : raise IllegalArgumentError ( "description should be provided as a string" ) update_dict . update ( { 'script_version' : version } ) if kwargs : # pragma: no cover update_dict . update ( * * kwargs ) response = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'service' , service_id = self . id ) , json = update_dict ) if response . status_code != requests . codes . ok : # pragma: no cover raise APIError ( "Could not update Service ({})" . format ( response ) ) if name : self . name = name if version : self . version = version
4,416
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L73-L115
[ "def", "validate_arguments", "(", "log", ",", "whitelisted_args", ",", "args", ")", ":", "valid_patterns", "=", "{", "re", ".", "compile", "(", "p", ")", ":", "v", "for", "p", ",", "v", "in", "whitelisted_args", ".", "items", "(", ")", "}", "def", "validate", "(", "idx", ")", ":", "arg", "=", "args", "[", "idx", "]", "for", "pattern", ",", "has_argument", "in", "valid_patterns", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "arg", ")", ":", "return", "2", "if", "has_argument", "else", "1", "log", ".", "warn", "(", "\"Zinc argument '{}' is not supported, and is subject to change/removal!\"", ".", "format", "(", "arg", ")", ")", "return", "1", "arg_index", "=", "0", "while", "arg_index", "<", "len", "(", "args", ")", ":", "arg_index", "+=", "validate", "(", "arg_index", ")" ]
Delete this service .
def delete ( self ) : # type: () -> None response = self . _client . _request ( 'DELETE' , self . _client . _build_url ( 'service' , service_id = self . id ) ) if response . status_code != requests . codes . no_content : # pragma: no cover raise APIError ( "Could not delete service: {} with id {}" . format ( self . name , self . id ) )
4,417
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L117-L126
[ "def", "max_or", "(", "a", ",", "b", ",", "c", ",", "d", ",", "w", ")", ":", "m", "=", "(", "1", "<<", "(", "w", "-", "1", ")", ")", "while", "m", "!=", "0", ":", "if", "(", "b", "&", "d", "&", "m", ")", "!=", "0", ":", "temp", "=", "(", "b", "-", "m", ")", "|", "(", "m", "-", "1", ")", "if", "temp", ">=", "a", ":", "b", "=", "temp", "break", "temp", "=", "(", "d", "-", "m", ")", "|", "(", "m", "-", "1", ")", "if", "temp", ">=", "c", ":", "d", "=", "temp", "break", "m", ">>=", "1", "return", "b", "|", "d" ]
Retrieve the executions related to the current service .
def get_executions ( self , * * kwargs ) : return self . _client . service_executions ( service = self . id , scope = self . scope_id , * * kwargs )
4,418
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L177-L187
[ "def", "mergeDQarray", "(", "maskname", ",", "dqarr", ")", ":", "maskarr", "=", "None", "if", "maskname", "is", "not", "None", ":", "if", "isinstance", "(", "maskname", ",", "str", ")", ":", "# working with file on disk (default case)", "if", "os", ".", "path", ".", "exists", "(", "maskname", ")", ":", "mask", "=", "fileutil", ".", "openImage", "(", "maskname", ",", "memmap", "=", "False", ")", "maskarr", "=", "mask", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "mask", ".", "close", "(", ")", "else", ":", "if", "isinstance", "(", "maskname", ",", "fits", ".", "HDUList", ")", ":", "# working with a virtual input file", "maskarr", "=", "maskname", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "else", ":", "maskarr", "=", "maskname", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "if", "maskarr", "is", "not", "None", ":", "# merge array with dqarr now", "np", ".", "bitwise_and", "(", "dqarr", ",", "maskarr", ",", "dqarr", ")" ]
Retrieve the Service object to which this execution is associated .
def service ( self ) : if not self . _service : self . _service = self . _client . service ( id = self . service_id ) return self . _service
4,419
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L232-L236
[ "def", "rename_sectors", "(", "self", ",", "sectors", ")", ":", "if", "type", "(", "sectors", ")", "is", "list", ":", "sectors", "=", "{", "old", ":", "new", "for", "old", ",", "new", "in", "zip", "(", "self", ".", "get_sectors", "(", ")", ",", "sectors", ")", "}", "for", "df", "in", "self", ".", "get_DataFrame", "(", "data", "=", "True", ")", ":", "df", ".", "rename", "(", "index", "=", "sectors", ",", "columns", "=", "sectors", ",", "inplace", "=", "True", ")", "try", ":", "for", "ext", "in", "self", ".", "get_extensions", "(", "data", "=", "True", ")", ":", "for", "df", "in", "ext", ".", "get_DataFrame", "(", "data", "=", "True", ")", ":", "df", ".", "rename", "(", "index", "=", "sectors", ",", "columns", "=", "sectors", ",", "inplace", "=", "True", ")", "except", ":", "pass", "self", ".", "meta", ".", "_add_modify", "(", "\"Changed sector names\"", ")", "return", "self" ]
Terminate the Service execution .
def terminate ( self ) : url = self . _client . _build_url ( 'service_execution_terminate' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( format = 'json' ) ) if response . status_code != requests . codes . accepted : # pragma: no cover raise APIError ( "Could not execute service '{}': {}" . format ( self , response ) )
4,420
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L241-L254
[ "def", "_delete_masked_points", "(", "*", "arrs", ")", ":", "if", "any", "(", "hasattr", "(", "a", ",", "'mask'", ")", "for", "a", "in", "arrs", ")", ":", "keep", "=", "~", "functools", ".", "reduce", "(", "np", ".", "logical_or", ",", "(", "np", ".", "ma", ".", "getmaskarray", "(", "a", ")", "for", "a", "in", "arrs", ")", ")", "return", "tuple", "(", "ma", ".", "asarray", "(", "a", "[", "keep", "]", ")", "for", "a", "in", "arrs", ")", "else", ":", "return", "arrs" ]
Retrieve the log of the service execution .
def get_log ( self , target_dir = None , log_filename = 'log.txt' ) : full_path = os . path . join ( target_dir or os . getcwd ( ) , log_filename ) url = self . _client . _build_url ( 'service_execution_log' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url ) if response . status_code != requests . codes . ok : # pragma: no cover raise APIError ( "Could not download service execution log" ) with open ( full_path , 'w+b' ) as f : for chunk in response : f . write ( chunk )
4,421
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L256-L278
[ "def", "mergeDQarray", "(", "maskname", ",", "dqarr", ")", ":", "maskarr", "=", "None", "if", "maskname", "is", "not", "None", ":", "if", "isinstance", "(", "maskname", ",", "str", ")", ":", "# working with file on disk (default case)", "if", "os", ".", "path", ".", "exists", "(", "maskname", ")", ":", "mask", "=", "fileutil", ".", "openImage", "(", "maskname", ",", "memmap", "=", "False", ")", "maskarr", "=", "mask", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "mask", ".", "close", "(", ")", "else", ":", "if", "isinstance", "(", "maskname", ",", "fits", ".", "HDUList", ")", ":", "# working with a virtual input file", "maskarr", "=", "maskname", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "else", ":", "maskarr", "=", "maskname", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", "if", "maskarr", "is", "not", "None", ":", "# merge array with dqarr now", "np", ".", "bitwise_and", "(", "dqarr", ",", "maskarr", ",", "dqarr", ")" ]
Get the url of the notebook if the notebook is executed in interactive mode .
def get_notebook_url ( self ) : url = self . _client . _build_url ( 'service_execution_notebook_url' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( format = 'json' ) ) if response . status_code != requests . codes . ok : raise APIError ( "Could not retrieve notebook url '{}': {}" . format ( self , response ) ) data = response . json ( ) url = data . get ( 'results' ) [ 0 ] . get ( 'url' ) return url
4,422
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L280-L297
[ "def", "make_descriptors", "(", "self", ",", "base_name", ")", ":", "units_name", "=", "base_name", "+", "\"_units\"", "units_props", "=", "self", ".", "_units_type", ".", "make_descriptors", "(", "units_name", ")", "return", "units_props", "+", "[", "UnitsSpecPropertyDescriptor", "(", "base_name", ",", "self", ",", "units_props", "[", "0", "]", ")", "]" ]
Send websocket data frame to the client . If data is a unicode object then the frame is sent as Text . If the data is a bytearray object then the frame is sent as Binary .
def sendMessage ( self , data ) : opcode = BINARY if isinstance ( data , unicode ) : opcode = TEXT self . _sendMessage ( False , opcode , data )
4,423
https://github.com/pdxjohnny/SimpleHTTPSServer/blob/5ba0490e1c15541287f89abedfdcd2ff70ad1e88/SimpleHTTPSServer/SimpleWebSocketServer.py#L343-L353
[ "def", "unmount", "(", "self", ")", ":", "self", ".", "unmount_bindmounts", "(", ")", "self", ".", "unmount_mounts", "(", ")", "self", ".", "unmount_volume_groups", "(", ")", "self", ".", "unmount_loopbacks", "(", ")", "self", ".", "unmount_base_images", "(", ")", "self", ".", "clean_dirs", "(", ")" ]
Function that equalises the input arrays by zero - padding the shortest one .
def _shape_array ( array1 , array2 ) : if len ( array1 ) > len ( array2 ) : new_array = array2 old_array = array1 else : new_array = array1 old_array = array2 length = len ( old_array ) - len ( new_array ) for i in range ( length ) : n = new_array [ - 1 ] . copy ( ) n [ 0 : : 3 ] += 1 n [ 2 : : 3 ] = 0 new_array = np . vstack ( [ new_array , [ n ] ] ) arrays = np . hstack ( [ old_array , new_array ] ) return arrays
4,424
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/synchronisation.py#L315-L348
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
This function allows to generate a text file with synchronised signals from the input file .
def _create_txt_from_str ( in_path , channels , new_path ) : header = [ "# OpenSignals Text File Format" ] files = [ bsnb . load ( in_path ) ] with open ( in_path , encoding = "latin-1" ) as opened_p : header . append ( opened_p . readlines ( ) [ 1 ] ) header . append ( "# EndOfHeader" ) data = [ ] nr_channels = [ ] for file in files : for i , device in enumerate ( file . keys ( ) ) : nr_channels . append ( len ( list ( file [ device ] ) ) ) data . append ( file [ device ] [ channels [ i ] ] ) dephase , s1 , s2 = synchronise_signals ( data [ 0 ] , data [ 1 ] ) new_header = [ h . replace ( "\n" , "" ) for h in header ] sync_file = open ( new_path , 'w' ) sync_file . write ( ' \n' . join ( new_header ) + '\n' ) old_columns = np . loadtxt ( in_path ) if np . array_equal ( s1 , data [ 0 ] ) : # Change the second device aux = 3 * nr_channels [ 0 ] columns = old_columns [ dephase : , aux : ] new_file = _shape_array ( old_columns [ : , : aux ] , columns ) elif np . array_equal ( s2 , data [ 1 ] ) : # Change the first device aux = 3 * nr_channels [ 1 ] columns = old_columns [ dephase : , : aux ] new_file = _shape_array ( columns , old_columns [ : , aux : ] ) else : print ( "The devices are synchronised." ) return for line in new_file : sync_file . write ( '\t' . join ( str ( int ( i ) ) for i in line ) + '\t\n' ) sync_file . close ( )
4,425
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/synchronisation.py#L444-L493
[ "def", "_get_port_speed_price_id", "(", "items", ",", "port_speed", ",", "no_public", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'port_speed'", ":", "continue", "# Check for correct capacity and if the item matches private only", "if", "any", "(", "[", "int", "(", "utils", ".", "lookup", "(", "item", ",", "'capacity'", ")", ")", "!=", "port_speed", ",", "_is_private_port_speed_item", "(", "item", ")", "!=", "no_public", ",", "not", "_is_bonded", "(", "item", ")", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for port speed: '%s'\"", "%", "port_speed", ")" ]
Renders a string from a path template using the provided bindings .
def render ( self , bindings ) : out = [ ] binding = False for segment in self . segments : if segment . kind == _BINDING : if segment . literal not in bindings : raise ValidationException ( ( 'rendering error: value for key \'{}\' ' 'not provided' ) . format ( segment . literal ) ) out . extend ( PathTemplate ( bindings [ segment . literal ] ) . segments ) binding = True elif segment . kind == _END_BINDING : binding = False else : if binding : continue out . append ( segment ) path = _format ( out ) self . match ( path ) return path
4,426
https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L82-L113
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Matches a fully qualified path template string .
def match ( self , path ) : this = self . segments that = path . split ( '/' ) current_var = None bindings = { } segment_count = self . segment_count j = 0 for i in range ( 0 , len ( this ) ) : if j >= len ( that ) : break if this [ i ] . kind == _TERMINAL : if this [ i ] . literal == '*' : bindings [ current_var ] = that [ j ] j += 1 elif this [ i ] . literal == '**' : until = j + len ( that ) - segment_count + 1 segment_count += len ( that ) - segment_count bindings [ current_var ] = '/' . join ( that [ j : until ] ) j = until elif this [ i ] . literal != that [ j ] : raise ValidationException ( 'mismatched literal: \'%s\' != \'%s\'' % ( this [ i ] . literal , that [ j ] ) ) else : j += 1 elif this [ i ] . kind == _BINDING : current_var = this [ i ] . literal if j != len ( that ) or j != segment_count : raise ValidationException ( 'match error: could not render from the path template: {}' . format ( path ) ) return bindings
4,427
https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L115-L157
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Returns a list of path template segments parsed from data .
def parse ( self , data ) : self . binding_var_count = 0 self . segment_count = 0 segments = self . parser . parse ( data ) # Validation step: checks that there are no nested bindings. path_wildcard = False for segment in segments : if segment . kind == _TERMINAL and segment . literal == '**' : if path_wildcard : raise ValidationException ( 'validation error: path template cannot contain more ' 'than one path wildcard' ) path_wildcard = True return segments
4,428
https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L190-L211
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Create a notification object .
def create ( window , root ) : notifications = { } _id = root . get_property ( "id" ) from foxpuppet . windows . browser . notifications import addons notifications . update ( addons . NOTIFICATIONS ) return notifications . get ( _id , BaseNotification ) ( window , root )
4,429
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L19-L39
[ "def", "assignParameters", "(", "self", ",", "solution_next", ",", "IncomeDstn", ",", "LivPrb", ",", "DiscFac", ",", "CRRA", ",", "Rfree", ",", "PermGroFac", ",", "BoroCnstArt", ",", "aXtraGrid", ",", "vFuncBool", ",", "CubicBool", ")", ":", "ConsPerfForesightSolver", ".", "assignParameters", "(", "self", ",", "solution_next", ",", "DiscFac", ",", "LivPrb", ",", "CRRA", ",", "Rfree", ",", "PermGroFac", ")", "self", ".", "BoroCnstArt", "=", "BoroCnstArt", "self", ".", "IncomeDstn", "=", "IncomeDstn", "self", ".", "aXtraGrid", "=", "aXtraGrid", "self", ".", "vFuncBool", "=", "vFuncBool", "self", ".", "CubicBool", "=", "CubicBool" ]
Provide access to the notification label .
def label ( self ) : with self . selenium . context ( self . selenium . CONTEXT_CHROME ) : return self . root . get_attribute ( "label" )
4,430
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L42-L50
[ "def", "data_complete", "(", "datadir", ",", "sitedir", ",", "get_container_name", ")", ":", "if", "any", "(", "not", "path", ".", "isdir", "(", "sitedir", "+", "x", ")", "for", "x", "in", "(", "'/files'", ",", "'/run'", ",", "'/solr'", ")", ")", ":", "return", "False", "if", "docker", ".", "is_boot2docker", "(", ")", ":", "# Inspect returns None if the container doesn't exist.", "return", "all", "(", "docker", ".", "inspect_container", "(", "get_container_name", "(", "x", ")", ")", "for", "x", "in", "(", "'pgdata'", ",", "'venv'", ")", ")", "return", "path", ".", "isdir", "(", "datadir", "+", "'/venv'", ")", "and", "path", ".", "isdir", "(", "sitedir", "+", "'/postgres'", ")" ]
Provide access to the notification origin .
def origin ( self ) : with self . selenium . context ( self . selenium . CONTEXT_CHROME ) : return self . root . get_attribute ( "origin" )
4,431
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L53-L61
[ "def", "_ar_matrix", "(", "self", ")", ":", "X", "=", "np", ".", "ones", "(", "self", ".", "data_length", "-", "self", ".", "max_lag", ")", "if", "self", ".", "ar", "!=", "0", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "ar", ")", ":", "X", "=", "np", ".", "vstack", "(", "(", "X", ",", "self", ".", "data", "[", "(", "self", ".", "max_lag", "-", "i", "-", "1", ")", ":", "-", "i", "-", "1", "]", ")", ")", "return", "X" ]
Retrieve the primary button .
def find_primary_button ( self ) : if self . window . firefox_version >= 67 : return self . root . find_element ( By . CLASS_NAME , "popup-notification-primary-button" ) return self . root . find_anonymous_element_by_attribute ( "anonid" , "button" )
4,432
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L63-L69
[ "def", "_set_max_value", "(", "self", ",", "max_value", ")", ":", "self", ".", "_external_max_value", "=", "max_value", "# Check that the current value of the parameter is still within the boundaries. If not, issue a warning", "if", "self", ".", "_external_max_value", "is", "not", "None", "and", "self", ".", "value", ">", "self", ".", "_external_max_value", ":", "warnings", ".", "warn", "(", "\"The current value of the parameter %s (%s) \"", "\"was above the new maximum %s.\"", "%", "(", "self", ".", "name", ",", "self", ".", "value", ",", "self", ".", "_external_max_value", ")", ",", "exceptions", ".", "RuntimeWarning", ")", "self", ".", "value", "=", "self", ".", "_external_max_value" ]
Return a list of all open windows .
def windows ( self ) : from foxpuppet . windows import BrowserWindow return [ BrowserWindow ( self . selenium , handle ) for handle in self . selenium . window_handles ]
4,433
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/manager.py#L26-L38
[ "def", "post", "(", "self", ",", "url", ",", "data", ",", "proto", "=", "'http'", ",", "form_name", "=", "None", ")", ":", "form", "=", "self", ".", "translator", ".", "fill_form", "(", "self", ".", "last_response_soup", ",", "form_name", "if", "form_name", "else", "url", ",", "data", ")", "self", ".", "last_response", "=", "self", ".", "session", ".", "post", "(", "proto", "+", "self", ".", "base_uri", "+", "url", ",", "headers", "=", "self", ".", "headers", ",", "cookies", "=", "self", ".", "cookies", ",", "data", "=", "form", ",", "allow_redirects", "=", "True", ",", "verify", "=", "self", ".", "verify", ")", "return", "self", ".", "last_response_soup" ]
Read thread .
def read_daemon ( self ) : while True : data = self . _socket . recv ( 9999 ) self . feed_parser ( data )
4,434
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/connections/socket.py#L66-L70
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Process the inner logic of the validator .
def _logic ( self , value = None ) : # type: (Any) -> Tuple[Union[bool, None], str] self . _validation_result , self . _validation_reason = None , 'No reason' return self . _validation_result , self . _validation_reason
4,435
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/validators/validators_base.py#L194-L201
[ "def", "after_epoch", "(", "self", ",", "epoch_id", ":", "int", ",", "epoch_data", ":", "EpochData", ")", "->", "None", ":", "logging", ".", "debug", "(", "'Saving epoch %d data to \"%s\"'", ",", "epoch_id", ",", "self", ".", "_file_path", ")", "if", "not", "self", ".", "_header_written", ":", "self", ".", "_write_header", "(", "epoch_data", "=", "epoch_data", ")", "self", ".", "_write_row", "(", "epoch_id", "=", "epoch_id", ",", "epoch_data", "=", "epoch_data", ")" ]
Query the darwin webservice to obtain a board for a particular station and return a StationBoard instance
def get_station_board ( self , crs , rows = 17 , include_departures = True , include_arrivals = False , destination_crs = None , origin_crs = None ) : # Determine the darwn query we want to make if include_departures and include_arrivals : query_type = 'GetArrivalDepartureBoard' elif include_departures : query_type = 'GetDepartureBoard' elif include_arrivals : query_type = 'GetArrivalBoard' else : raise ValueError ( "get_station_board must have either include_departures or \ include_arrivals set to True" ) # build a query function q = partial ( self . _base_query ( ) [ query_type ] , crs = crs , numRows = rows ) if destination_crs : if origin_crs : log . warn ( "Station board query can only filter on one of \ destination_crs and origin_crs, using only destination_crs" ) q = partial ( q , filterCrs = destination_crs , filterType = 'to' ) elif origin_crs : q = partial ( q , filterCrs = origin_crs , filterType = 'from' ) try : soap_response = q ( ) except WebFault : raise WebServiceError return StationBoard ( soap_response )
4,436
https://github.com/robert-b-clarke/nre-darwin-py/blob/6b0b181770e085dc7f71fbd2eb3fe779f653da62/nredarwin/webservice.py#L67-L121
[ "def", "check_header", "(", "in_bam", ",", "rgnames", ",", "ref_file", ",", "config", ")", ":", "_check_bam_contigs", "(", "in_bam", ",", "ref_file", ",", "config", ")", "_check_sample", "(", "in_bam", ",", "rgnames", ")" ]
Get the details of an individual service and return a ServiceDetails instance .
def get_service_details ( self , service_id ) : service_query = self . _soap_client . service [ 'LDBServiceSoap' ] [ 'GetServiceDetails' ] try : soap_response = service_query ( serviceID = service_id ) except WebFault : raise WebServiceError return ServiceDetails ( soap_response )
4,437
https://github.com/robert-b-clarke/nre-darwin-py/blob/6b0b181770e085dc7f71fbd2eb3fe779f653da62/nredarwin/webservice.py#L123-L137
[ "def", "workspace_state_changed", "(", "ob", ",", "event", ")", ":", "workspace", "=", "event", ".", "object", "roles", "=", "[", "'Guest'", ",", "]", "if", "event", ".", "new_state", ".", "id", "==", "'open'", ":", "api", ".", "group", ".", "grant_roles", "(", "groupname", "=", "INTRANET_USERS_GROUP_ID", ",", "obj", "=", "workspace", ",", "roles", "=", "roles", ",", ")", "workspace", ".", "reindexObjectSecurity", "(", ")", "elif", "event", ".", "old_state", ".", "id", "==", "'open'", ":", "api", ".", "group", ".", "revoke_roles", "(", "groupname", "=", "INTRANET_USERS_GROUP_ID", ",", "obj", "=", "workspace", ",", "roles", "=", "roles", ",", ")", "workspace", ".", "reindexObjectSecurity", "(", ")" ]
Render and save API doc in openapi . yml .
def render_template ( self ) : self . _parse_paths ( ) context = dict ( napp = self . _napp . __dict__ , paths = self . _paths ) self . _save ( context )
4,438
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L35-L39
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "# The 'or' part is for Python 2.6", "result", "=", "self", ".", "__event", ".", "wait", "(", "timeout", ")", "# pylint: disable=E0702", "# Pylint seems to miss the \"is None\" check below", "if", "self", ".", "__exception", "is", "None", ":", "return", "result", "else", ":", "raise", "self", ".", "__exception" ]
Return URL rule HTTP methods and docstring .
def _parse_decorated_functions ( self , code ) : matches = re . finditer ( r""" # @rest decorators (?P<decorators> (?:@rest\(.+?\)\n)+ # one or more @rest decorators inside ) # docstring delimited by 3 double quotes .+?"{3}(?P<docstring>.+?)"{3} """ , code , re . VERBOSE | re . DOTALL ) for function_match in matches : m_dict = function_match . groupdict ( ) self . _parse_docstring ( m_dict [ 'docstring' ] ) self . _add_function_paths ( m_dict [ 'decorators' ] )
4,439
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L46-L60
[ "def", "setOverlayTransformOverlayRelative", "(", "self", ",", "ulOverlayHandle", ",", "ulOverlayHandleParent", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTransformOverlayRelative", "pmatParentOverlayToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "ulOverlayHandleParent", ",", "byref", "(", "pmatParentOverlayToOverlayTransform", ")", ")", "return", "result", ",", "pmatParentOverlayToOverlayTransform" ]
Parse the method docstring .
def _parse_docstring ( self , docstring ) : match = re . match ( r""" # Following PEP 257 \s* (?P<summary>[^\n]+?) \s* # First line ( # Description and YAML are optional (\n \s*){2} # Blank line # Description (optional) ( (?!-{3,}) # Don't use YAML as description \s* (?P<description>.+?) \s* # Third line and maybe others (?=-{3,})? # Stop if "---" is found )? # YAML spec (optional) **currently not used** ( -{3,}\n # "---" begins yaml spec (?P<open_api>.+) )? )? $""" , docstring , re . VERBOSE | re . DOTALL ) summary = 'TODO write the summary.' description = 'TODO write/remove the description' if match : m_dict = match . groupdict ( ) summary = m_dict [ 'summary' ] if m_dict [ 'description' ] : description = re . sub ( r'(\s|\n){2,}' , ' ' , m_dict [ 'description' ] ) self . _summary = summary self . _description = description
4,440
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L69-L102
[ "def", "_compare_strings", "(", "cls", ",", "source", ",", "target", ")", ":", "start", "=", "0", "end", "=", "len", "(", "source", ")", "begins", "=", "0", "ends", "=", "0", "# Reading of initial wildcard in source", "if", "source", ".", "startswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_MULTI", ")", ":", "# Source starts with \"*\"", "start", "=", "1", "begins", "=", "-", "1", "else", ":", "while", "(", "(", "start", "<", "len", "(", "source", ")", ")", "and", "source", ".", "startswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_ONE", ",", "start", ",", "start", ")", ")", ":", "# Source starts with one or more \"?\"", "start", "+=", "1", "begins", "+=", "1", "# Reading of final wildcard in source", "if", "(", "source", ".", "endswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_MULTI", ")", "and", "CPESet2_3", ".", "_is_even_wildcards", "(", "source", ",", "end", "-", "1", ")", ")", ":", "# Source ends in \"*\"", "end", "-=", "1", "ends", "=", "-", "1", "else", ":", "while", "(", "(", "end", ">", "0", ")", "and", "source", ".", "endswith", "(", "CPEComponent2_3_WFN", ".", "WILDCARD_ONE", ",", "end", "-", "1", ",", "end", ")", "and", "CPESet2_3", ".", "_is_even_wildcards", "(", "source", ",", "end", "-", "1", ")", ")", ":", "# Source ends in \"?\"", "end", "-=", "1", "ends", "+=", "1", "source", "=", "source", "[", "start", ":", "end", "]", "index", "=", "-", "1", "leftover", "=", "len", "(", "target", ")", "while", "(", "leftover", ">", "0", ")", ":", "index", "=", "target", ".", "find", "(", "source", ",", "index", "+", "1", ")", "if", "(", "index", "==", "-", "1", ")", ":", "break", "escapes", "=", "target", ".", "count", "(", "\"\\\\\"", ",", "0", ",", "index", ")", "if", "(", "(", "index", ">", "0", ")", "and", "(", "begins", "!=", "-", "1", ")", "and", "(", "begins", "<", "(", "index", "-", "escapes", ")", ")", ")", ":", "break", "escapes", "=", "target", ".", "count", "(", "\"\\\\\"", ",", "index", "+", "1", ",", "len", "(", "target", ")", ")", "leftover", "=", "len", "(", "target", ")", "-", "index", "-", "escapes", "-", "len", "(", "source", ")", "if", "(", "(", "leftover", ">", "0", ")", "and", "(", "(", "ends", "!=", "-", "1", ")", "and", "(", "leftover", ">", "ends", ")", ")", ")", ":", "continue", "return", "CPESet2_3", ".", "LOGICAL_VALUE_SUPERSET", "return", "CPESet2_3", ".", "LOGICAL_VALUE_DISJOINT" ]
Return HTTP method list . Use json for security reasons .
def _parse_methods ( cls , list_string ) : if list_string is None : return APIServer . DEFAULT_METHODS # json requires double quotes json_list = list_string . replace ( "'" , '"' ) return json . loads ( json_list )
4,441
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L127-L133
[ "def", "setFlag", "(", "self", ",", "flag", ",", "state", "=", "True", ")", ":", "if", "state", ":", "self", ".", "__flags", "|=", "flag", "else", ":", "self", ".", "__flags", "&=", "~", "flag" ]
Convert relative Flask rule to absolute OpenAPI path .
def _rule2path ( cls , rule ) : typeless = re . sub ( r'<\w+?:' , '<' , rule ) # remove Flask types return typeless . replace ( '<' , '{' ) . replace ( '>' , '}' )
4,442
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L142-L145
[ "def", "fromgroups", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "p", "=", "OptionParser", "(", "fromgroups", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "groupsfile", "=", "args", "[", "0", "]", "bedfiles", "=", "args", "[", "1", ":", "]", "beds", "=", "[", "Bed", "(", "x", ")", "for", "x", "in", "bedfiles", "]", "fp", "=", "open", "(", "groupsfile", ")", "groups", "=", "[", "row", ".", "strip", "(", ")", ".", "split", "(", "\",\"", ")", "for", "row", "in", "fp", "]", "for", "b1", ",", "b2", "in", "product", "(", "beds", ",", "repeat", "=", "2", ")", ":", "extract_pairs", "(", "b1", ",", "b2", ",", "groups", ")" ]
Retrieve the property belonging to this part based on its name or uuid .
def property ( self , name ) : found = None if is_uuid ( name ) : found = find ( self . properties , lambda p : name == p . id ) else : found = find ( self . properties , lambda p : name == p . name ) if not found : raise NotFoundError ( "Could not find property with name or id {}" . format ( name ) ) return found
4,443
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L69-L103
[ "def", "_set_command_line_arguments", "(", "self", ",", "args", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"setting command line arguments\"", ")", "if", "args", ".", "VERBOSE", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"verbose mode active\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "log_level", "=", "logging", ".", "DEBUG", "Global", ".", "LOGGER_INSTANCE", ".", "reconfigure_log_level", "(", ")", "if", "args", ".", "STATS", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"stats requested every {args.STATS} seconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "show_stats", "=", "True", "Global", ".", "CONFIG_MANAGER", ".", "stats_timeout", "=", "args", ".", "STATS", "if", "args", ".", "INTERVAL", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"setting sleep interval to {args.INTERVAL} milliseconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "sleep_interval", "=", "float", "(", "args", ".", "INTERVAL", ")", "/", "1000", "if", "args", ".", "TRACE", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"tracing mode active\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", "=", "True", "Global", ".", "CONFIG_MANAGER", ".", "log_level", "=", "logging", ".", "DEBUG", "Global", ".", "LOGGER_INSTANCE", ".", "reconfigure_log_level", "(", ")", "if", "args", ".", "MESSAGEINTERVAL", "is", "not", "None", "and", "args", ".", "MESSAGEINTERVAL", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "message_fetcher_sleep_interval", "=", "float", "(", "args", ".", "MESSAGEINTERVAL", ")", "/", "10000", "Global", ".", "CONFIG_MANAGER", ".", "fixed_message_fetcher_interval", "=", "True", "Global", ".", "LOGGER", ".", "debug", "(", "f\"recipes to be parsed: {args.FILENAME}\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "recipes", "=", "(", "args", ".", "FILENAME", ")" ]
Retrieve the parent of this Part .
def parent ( self ) : # type: () -> Any if self . parent_id : return self . _client . part ( pk = self . parent_id , category = self . category ) else : return None
4,444
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L105-L122
[ "def", "refresh", "(", "self", ",", "refresh_token", ")", ":", "r", "=", "requests", ".", "post", "(", "self", ".", "apiurl", "+", "\"/token\"", ",", "params", "=", "{", "\"grant_type\"", ":", "\"refresh_token\"", ",", "\"client_id\"", ":", "self", ".", "cid", ",", "\"client_secret\"", ":", "self", ".", "csecret", ",", "\"refresh_token\"", ":", "refresh_token", "}", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ServerError", "jsd", "=", "r", ".", "json", "(", ")", "return", "jsd", "[", "'access_token'", "]", ",", "int", "(", "jsd", "[", "'expires_in'", "]", ")", "+", "int", "(", "jsd", "[", "'created_at'", "]", ")" ]
Retrieve the children of this Part as Partset .
def children ( self , * * kwargs ) : if not kwargs : # no kwargs provided is the default, we aim to cache it. if not self . _cached_children : self . _cached_children = list ( self . _client . parts ( parent = self . id , category = self . category ) ) return self . _cached_children else : # if kwargs are provided, we assume no use of cache as specific filtering on the children is performed. return self . _client . parts ( parent = self . id , category = self . category , * * kwargs )
4,445
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L124-L163
[ "def", "_login", "(", "self", ",", "username", ",", "password", ")", ":", "data", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "'grant_type'", ":", "'password'", "}", "r", "=", "self", ".", "spark_api", ".", "oauth", ".", "token", ".", "POST", "(", "auth", "=", "(", "'spark'", ",", "'spark'", ")", ",", "data", "=", "data", ",", "timeout", "=", "self", ".", "timeout", ")", "self", ".", "_check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")", "[", "'access_token'", "]" ]
Retrieve the siblings of this Part as Partset .
def siblings ( self , * * kwargs ) : # type: (Any) -> Any if self . parent_id : return self . _client . parts ( parent = self . parent_id , category = self . category , * * kwargs ) else : from pykechain . models . partset import PartSet return PartSet ( parts = [ ] )
4,446
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L165-L181
[ "def", "_login", "(", "self", ",", "username", ",", "password", ")", ":", "data", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "'grant_type'", ":", "'password'", "}", "r", "=", "self", ".", "spark_api", ".", "oauth", ".", "token", ".", "POST", "(", "auth", "=", "(", "'spark'", ",", "'spark'", ")", ",", "data", "=", "data", ",", "timeout", "=", "self", ".", "timeout", ")", "self", ".", "_check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")", "[", "'access_token'", "]" ]
Retrieve the model of this Part as Part .
def model ( self ) : if self . category == Category . INSTANCE : model_id = self . _json_data [ 'model' ] . get ( 'id' ) return self . _client . model ( pk = model_id ) else : raise NotFoundError ( "Part {} has no model" . format ( self . name ) )
4,447
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L183-L205
[ "def", "run_tornado", "(", "self", ",", "args", ")", ":", "server", "=", "self", "import", "tornado", ".", "ioloop", "import", "tornado", ".", "web", "import", "tornado", ".", "websocket", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "class", "DevWebSocketHandler", "(", "tornado", ".", "websocket", ".", "WebSocketHandler", ")", ":", "def", "open", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "open", "(", ")", "server", ".", "on_open", "(", "self", ")", "def", "on_message", "(", "self", ",", "message", ")", ":", "server", ".", "on_message", "(", "self", ",", "message", ")", "def", "on_close", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "on_close", "(", ")", "server", ".", "on_close", "(", "self", ")", "class", "MainHandler", "(", "tornado", ".", "web", ".", "RequestHandler", ")", ":", "def", "get", "(", "self", ")", ":", "self", ".", "write", "(", "server", ".", "index_page", ")", "#: Set the call later method", "server", ".", "call_later", "=", "ioloop", ".", "call_later", "server", ".", "add_callback", "=", "ioloop", ".", "add_callback", "app", "=", "tornado", ".", "web", ".", "Application", "(", "[", "(", "r\"/\"", ",", "MainHandler", ")", ",", "(", "r\"/dev\"", ",", "DevWebSocketHandler", ")", ",", "]", ")", "app", ".", "listen", "(", "self", ".", "port", ")", "print", "(", "\"Tornado Dev server started on {}\"", ".", "format", "(", "self", ".", "port", ")", ")", "ioloop", ".", "start", "(", ")" ]
Retrieve the instances of this Part as a PartSet .
def instances ( self , * * kwargs ) : if self . category == Category . MODEL : return self . _client . parts ( model = self , category = Category . INSTANCE , * * kwargs ) else : raise NotFoundError ( "Part {} is not a model" . format ( self . name ) )
4,448
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L207-L234
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Retrieve the proxy model of this proxied Part as a Part .
def proxy_model ( self ) : if self . category != Category . MODEL : raise IllegalArgumentError ( "Part {} is not a model, therefore it cannot have a proxy model" . format ( self ) ) if 'proxy' in self . _json_data and self . _json_data . get ( 'proxy' ) : catalog_model_id = self . _json_data [ 'proxy' ] . get ( 'id' ) return self . _client . model ( pk = catalog_model_id ) else : raise NotFoundError ( "Part {} is not a proxy" . format ( self . name ) )
4,449
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L255-L281
[ "def", "_get_graph", "(", "self", ",", "ctx", ",", "bundle", ",", "extensions", ",", "caller", "=", "None", ")", ":", "request", "=", "ctx", ".", "get", "(", "'request'", ")", "if", "request", "is", "None", ":", "request", "=", "get_current_request", "(", ")", "if", "':'", "in", "bundle", ":", "config_name", ",", "bundle", "=", "bundle", ".", "split", "(", "':'", ")", "else", ":", "config_name", "=", "'DEFAULT'", "webpack", "=", "request", ".", "webpack", "(", "config_name", ")", "assets", "=", "(", "caller", "(", "a", ")", "for", "a", "in", "webpack", ".", "get_bundle", "(", "bundle", ",", "extensions", ")", ")", "return", "''", ".", "join", "(", "assets", ")" ]
Add a new child instance based on a model to this part .
def add ( self , model , * * kwargs ) : # type: (Part, **Any) -> Part if self . category != Category . INSTANCE : raise APIError ( "Part should be of category INSTANCE" ) return self . _client . create_part ( self , model , * * kwargs )
4,450
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L283-L311
[ "def", "StreamMetrics", "(", "self", ",", "request_iterator", ",", "context", ")", ":", "LOG", ".", "debug", "(", "\"StreamMetrics called\"", ")", "# set up arguments", "collect_args", "=", "(", "next", "(", "request_iterator", ")", ")", "max_metrics_buffer", "=", "0", "max_collect_duration", "=", "0", "cfg", "=", "Metric", "(", "pb", "=", "collect_args", ".", "Metrics_Arg", ".", "metrics", "[", "0", "]", ")", "try", ":", "max_metrics_buffer", "=", "int", "(", "cfg", ".", "config", "[", "\"max-metrics-buffer\"", "]", ")", "except", "Exception", "as", "ex", ":", "LOG", ".", "debug", "(", "\"Unable to get schedule parameters: {}\"", ".", "format", "(", "ex", ")", ")", "try", ":", "max_collect_duration", "=", "int", "(", "cfg", ".", "config", "[", "\"max-collect-duration\"", "]", ")", "except", "Exception", "as", "ex", ":", "LOG", ".", "debug", "(", "\"Unable to get schedule parameters: {}\"", ".", "format", "(", "ex", ")", ")", "if", "max_metrics_buffer", ">", "0", ":", "self", ".", "max_metrics_buffer", "=", "max_metrics_buffer", "if", "max_collect_duration", ">", "0", ":", "self", ".", "max_collect_duration", "=", "max_collect_duration", "# start collection thread", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_stream_wrapper", ",", "args", "=", "(", "collect_args", ",", ")", ",", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")", "# stream metrics", "metrics", "=", "[", "]", "metrics_to_stream", "=", "[", "]", "stream_timeout", "=", "self", ".", "max_collect_duration", "while", "context", ".", "is_active", "(", ")", ":", "try", ":", "# wait for metrics until timeout is reached", "t_start", "=", "time", ".", "time", "(", ")", "metrics", "=", "self", ".", "metrics_queue", ".", "get", "(", "block", "=", "True", ",", "timeout", "=", "stream_timeout", ")", "elapsed", "=", "round", "(", "time", ".", "time", "(", ")", "-", "t_start", ")", "stream_timeout", "-=", "elapsed", "except", "queue", ".", "Empty", ":", "LOG", ".", "debug", "(", "\"Max collect duration exceeded. Streaming {} metrics\"", ".", "format", "(", "len", "(", "metrics_to_stream", ")", ")", ")", "metrics_col", "=", "CollectReply", "(", "Metrics_Reply", "=", "MetricsReply", "(", "metrics", "=", "[", "m", ".", "pb", "for", "m", "in", "metrics_to_stream", "]", ")", ")", "metrics_to_stream", "=", "[", "]", "stream_timeout", "=", "self", ".", "max_collect_duration", "yield", "metrics_col", "else", ":", "for", "metric", "in", "metrics", ":", "metrics_to_stream", ".", "append", "(", "metric", ")", "if", "len", "(", "metrics_to_stream", ")", "==", "self", ".", "max_metrics_buffer", ":", "LOG", ".", "debug", "(", "\"Max metrics buffer reached. Streaming {} metrics\"", ".", "format", "(", "len", "(", "metrics_to_stream", ")", ")", ")", "metrics_col", "=", "CollectReply", "(", "Metrics_Reply", "=", "MetricsReply", "(", "metrics", "=", "[", "m", ".", "pb", "for", "m", "in", "metrics_to_stream", "]", ")", ")", "metrics_to_stream", "=", "[", "]", "stream_timeout", "=", "self", ".", "max_collect_duration", "yield", "metrics_col", "# stream metrics if max_metrics_buffer is 0 or enough metrics has been collected", "if", "self", ".", "max_metrics_buffer", "==", "0", ":", "LOG", ".", "debug", "(", "\"Max metrics buffer set to 0. Streaming {} metrics\"", ".", "format", "(", "len", "(", "metrics_to_stream", ")", ")", ")", "metrics_col", "=", "CollectReply", "(", "Metrics_Reply", "=", "MetricsReply", "(", "metrics", "=", "[", "m", ".", "pb", "for", "m", "in", "metrics_to_stream", "]", ")", ")", "metrics_to_stream", "=", "[", "]", "stream_timeout", "=", "self", ".", "max_collect_duration", "yield", "metrics_col", "# sent notification if stream has been stopped", "self", ".", "done_queue", ".", "put", "(", "True", ")" ]
Add a new instance of this model to a part .
def add_to ( self , parent , * * kwargs ) : # type: (Part, **Any) -> Part if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_part ( parent , self , * * kwargs )
4,451
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L313-L344
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Add a new child model to this model .
def add_model ( self , * args , * * kwargs ) : # type: (*Any, **Any) -> Part if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_model ( self , * args , * * kwargs )
4,452
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L346-L360
[ "def", "_do_http", "(", "opts", ",", "profile", "=", "'default'", ")", ":", "ret", "=", "{", "}", "url", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:url'", ".", "format", "(", "profile", ")", ",", "''", ")", "user", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:user'", ".", "format", "(", "profile", ")", ",", "''", ")", "passwd", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:pass'", ".", "format", "(", "profile", ")", ",", "''", ")", "realm", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:realm'", ".", "format", "(", "profile", ")", ",", "''", ")", "timeout", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:timeout'", ".", "format", "(", "profile", ")", ",", "''", ")", "if", "not", "url", ":", "raise", "Exception", "(", "'missing url in profile {0}'", ".", "format", "(", "profile", ")", ")", "if", "user", "and", "passwd", ":", "auth", "=", "_auth", "(", "url", "=", "url", ",", "realm", "=", "realm", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ")", "_install_opener", "(", "auth", ")", "url", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "for", "line", "in", "_urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "splt", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "splt", "[", "0", "]", "in", "ret", ":", "ret", "[", "splt", "[", "0", "]", "]", "+=", "',{0}'", ".", "format", "(", "splt", "[", "1", "]", ")", "else", ":", "ret", "[", "splt", "[", "0", "]", "]", "=", "splt", "[", "1", "]", "return", "ret" ]
Add a new property to this model .
def add_property ( self , * args , * * kwargs ) : # type: (*Any, **Any) -> Property if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_property ( self , * args , * * kwargs )
4,453
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L398-L410
[ "def", "get_doc", "(", "logger", "=", "None", ",", "plugin", "=", "None", ",", "reporthook", "=", "None", ")", ":", "from", "ginga", ".", "GingaPlugin", "import", "GlobalPlugin", ",", "LocalPlugin", "if", "isinstance", "(", "plugin", ",", "GlobalPlugin", ")", ":", "plugin_page", "=", "'plugins_global'", "plugin_name", "=", "str", "(", "plugin", ")", "elif", "isinstance", "(", "plugin", ",", "LocalPlugin", ")", ":", "plugin_page", "=", "'plugins_local'", "plugin_name", "=", "str", "(", "plugin", ")", "else", ":", "plugin_page", "=", "None", "plugin_name", "=", "None", "try", ":", "index_html", "=", "_download_rtd_zip", "(", "reporthook", "=", "reporthook", ")", "# Download failed, use online resource", "except", "Exception", "as", "e", ":", "url", "=", "'https://ginga.readthedocs.io/en/latest/'", "if", "plugin_name", "is", "not", "None", ":", "if", "toolkit", ".", "family", ".", "startswith", "(", "'qt'", ")", ":", "# This displays plugin docstring.", "url", "=", "None", "else", ":", "# This redirects to online doc.", "url", "+=", "'manual/{}/{}.html'", ".", "format", "(", "plugin_page", ",", "plugin_name", ")", "if", "logger", "is", "not", "None", ":", "logger", ".", "error", "(", "str", "(", "e", ")", ")", "# Use local resource", "else", ":", "pfx", "=", "'file:'", "url", "=", "'{}{}'", ".", "format", "(", "pfx", ",", "index_html", ")", "# https://github.com/rtfd/readthedocs.org/issues/2803", "if", "plugin_name", "is", "not", "None", ":", "url", "+=", "'#{}'", ".", "format", "(", "plugin_name", ")", "return", "url" ]
Edit part name and property values in one go .
def update ( self , name = None , update_dict = None , bulk = True , * * kwargs ) : # dict(name=name, properties=json.dumps(update_dict))) with property ids:value action = 'bulk_update_properties' request_body = dict ( ) for prop_name_or_id , property_value in update_dict . items ( ) : if is_uuid ( prop_name_or_id ) : request_body [ prop_name_or_id ] = property_value else : request_body [ self . property ( prop_name_or_id ) . id ] = property_value if bulk and len ( update_dict . keys ( ) ) > 1 : if name : if not isinstance ( name , str ) : raise IllegalArgumentError ( "Name of the part should be provided as a string" ) r = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'part' , part_id = self . id ) , data = dict ( name = name , properties = json . dumps ( request_body ) , * * kwargs ) , params = dict ( select_action = action ) ) if r . status_code != requests . codes . ok : # pragma: no cover raise APIError ( '{}: {}' . format ( str ( r ) , r . content ) ) else : for property_name , property_value in update_dict . items ( ) : self . property ( property_name ) . value = property_value
4,454
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L505-L556
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Add a part and update its properties in one go .
def add_with_properties ( self , model , name = None , update_dict = None , bulk = True , * * kwargs ) : if self . category != Category . INSTANCE : raise APIError ( "Part should be of category INSTANCE" ) name = name or model . name action = 'new_instance_with_properties' properties_update_dict = dict ( ) for prop_name_or_id , property_value in update_dict . items ( ) : if is_uuid ( prop_name_or_id ) : properties_update_dict [ prop_name_or_id ] = property_value else : properties_update_dict [ model . property ( prop_name_or_id ) . id ] = property_value if bulk : r = self . _client . _request ( 'POST' , self . _client . _build_url ( 'parts' ) , data = dict ( name = name , model = model . id , parent = self . id , properties = json . dumps ( properties_update_dict ) , * * kwargs ) , params = dict ( select_action = action ) ) if r . status_code != requests . codes . created : # pragma: no cover raise APIError ( '{}: {}' . format ( str ( r ) , r . content ) ) return Part ( r . json ( ) [ 'results' ] [ 0 ] , client = self . _client ) else : # do the old way new_part = self . add ( model , name = name ) # type: Part new_part . update ( update_dict = update_dict , bulk = bulk ) return new_part
4,455
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L558-L618
[ "def", "handle_not_found", "(", "exception", ",", "*", "*", "extra", ")", ":", "assert", "isinstance", "(", "exception", ",", "NotFound", ")", "page", "=", "Page", ".", "query", ".", "filter", "(", "db", ".", "or_", "(", "Page", ".", "url", "==", "request", ".", "path", ",", "Page", ".", "url", "==", "request", ".", "path", "+", "\"/\"", ")", ")", ".", "first", "(", ")", "if", "page", ":", "_add_url_rule", "(", "page", ".", "url", ")", "return", "render_template", "(", "[", "page", ".", "template_name", ",", "current_app", ".", "config", "[", "'PAGES_DEFAULT_TEMPLATE'", "]", "]", ",", "page", "=", "page", ")", "elif", "'wrapped'", "in", "extra", ":", "return", "extra", "[", "'wrapped'", "]", "(", "exception", ")", "else", ":", "return", "exception" ]
Order the properties of a part model using a list of property objects or property names or property id s .
def order_properties ( self , property_list = None ) : if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) if not isinstance ( property_list , list ) : raise IllegalArgumentError ( 'Expected a list of strings or Property() objects, got a {} object' . format ( type ( property_list ) ) ) order_dict = dict ( ) for prop in property_list : if isinstance ( prop , ( str , text_type ) ) : order_dict [ self . property ( name = prop ) . id ] = property_list . index ( prop ) else : order_dict [ prop . id ] = property_list . index ( prop ) r = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'part' , part_id = self . id ) , data = dict ( property_order = json . dumps ( order_dict ) ) ) if r . status_code != requests . codes . ok : # pragma: no cover raise APIError ( "Could not reorder properties" )
4,456
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L644-L685
[ "def", "run", "(", "wrapped", ")", ":", "@", "wraps", "(", "wrapped", ")", "def", "_run", "(", "self", ",", "query", ",", "bindings", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_reconnect_if_missing_connection", "(", ")", "start", "=", "time", ".", "time", "(", ")", "try", ":", "result", "=", "wrapped", "(", "self", ",", "query", ",", "bindings", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "result", "=", "self", ".", "_try_again_if_caused_by_lost_connection", "(", "e", ",", "query", ",", "bindings", ",", "wrapped", ")", "t", "=", "self", ".", "_get_elapsed_time", "(", "start", ")", "self", ".", "log_query", "(", "query", ",", "bindings", ",", "t", ")", "return", "result", "return", "_run" ]
Clone a part .
def clone ( self , * * kwargs ) : parent = self . parent ( ) return self . _client . _create_clone ( parent , self , * * kwargs )
4,457
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L719-L737
[ "def", "create_supercut", "(", "composition", ",", "outputfile", ",", "padding", ")", ":", "print", "(", "\"[+] Creating clips.\"", ")", "demo_supercut", "(", "composition", ",", "padding", ")", "# add padding when necessary", "for", "(", "clip", ",", "nextclip", ")", "in", "zip", "(", "composition", ",", "composition", "[", "1", ":", "]", ")", ":", "if", "(", "(", "nextclip", "[", "'file'", "]", "==", "clip", "[", "'file'", "]", ")", "and", "(", "nextclip", "[", "'start'", "]", "<", "clip", "[", "'end'", "]", ")", ")", ":", "nextclip", "[", "'start'", "]", "+=", "padding", "# put all clips together:", "all_filenames", "=", "set", "(", "[", "c", "[", "'file'", "]", "for", "c", "in", "composition", "]", ")", "videofileclips", "=", "dict", "(", "[", "(", "f", ",", "VideoFileClip", "(", "f", ")", ")", "for", "f", "in", "all_filenames", "]", ")", "cut_clips", "=", "[", "videofileclips", "[", "c", "[", "'file'", "]", "]", ".", "subclip", "(", "c", "[", "'start'", "]", ",", "c", "[", "'end'", "]", ")", "for", "c", "in", "composition", "]", "print", "(", "\"[+] Concatenating clips.\"", ")", "final_clip", "=", "concatenate", "(", "cut_clips", ")", "print", "(", "\"[+] Writing ouput file.\"", ")", "final_clip", ".", "to_videofile", "(", "outputfile", ",", "codec", "=", "\"libx264\"", ",", "temp_audiofile", "=", "'temp-audio.m4a'", ",", "remove_temp", "=", "True", ",", "audio_codec", "=", "'aac'", ")" ]
Copy the Part to target parent both of them having the same category .
def copy ( self , target_parent , name = None , include_children = True , include_instances = True ) : if self . category == Category . MODEL and target_parent . category == Category . MODEL : # Cannot add a model under an instance or vice versa copied_model = relocate_model ( part = self , target_parent = target_parent , name = name , include_children = include_children ) if include_instances : instances_to_be_copied = list ( self . instances ( ) ) parent_instances = list ( target_parent . instances ( ) ) for parent_instance in parent_instances : for instance in instances_to_be_copied : instance . populate_descendants ( ) move_part_instance ( part_instance = instance , target_parent = parent_instance , part_model = self , name = instance . name , include_children = include_children ) return copied_model elif self . category == Category . INSTANCE and target_parent . category == Category . INSTANCE : copied_instance = relocate_instance ( part = self , target_parent = target_parent , name = name , include_children = include_children ) return copied_instance else : raise IllegalArgumentError ( 'part "{}" and target parent "{}" must have the same category' )
4,458
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L739-L785
[ "def", "mol_supplier", "(", "lines", ",", "no_halt", ",", "assign_descriptors", ")", ":", "def", "sdf_block", "(", "lns", ")", ":", "mol", "=", "[", "]", "opt", "=", "[", "]", "is_mol", "=", "True", "for", "line", "in", "lns", ":", "if", "line", ".", "startswith", "(", "\"$$$$\"", ")", ":", "yield", "mol", "[", ":", "]", ",", "opt", "[", ":", "]", "is_mol", "=", "True", "mol", ".", "clear", "(", ")", "opt", ".", "clear", "(", ")", "elif", "line", ".", "startswith", "(", "\"M END\"", ")", ":", "is_mol", "=", "False", "elif", "is_mol", ":", "mol", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "opt", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "if", "mol", ":", "yield", "mol", ",", "opt", "for", "i", ",", "(", "mol", ",", "opt", ")", "in", "enumerate", "(", "sdf_block", "(", "lines", ")", ")", ":", "try", ":", "c", "=", "molecule", "(", "mol", ")", "if", "assign_descriptors", ":", "molutil", ".", "assign_descriptors", "(", "c", ")", "except", "ValueError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Unsupported symbol: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported symbol: {}\"", ".", "format", "(", "err", ")", ")", "except", "RuntimeError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Failed to minimize ring: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Failed to minimize ring: {}\"", ".", "format", "(", "err", ")", ")", "except", ":", "if", "no_halt", ":", "print", "(", "\"Unexpected error (#{} in v2000reader)\"", ".", "format", "(", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c", "continue", "else", ":", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise", "Exception", "(", "\"Unsupported Error\"", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c" ]
Move the Part to target parent both of them the same category .
def move ( self , target_parent , name = None , include_children = True , include_instances = True ) : if not name : name = self . name if self . category == Category . MODEL and target_parent . category == Category . MODEL : moved_model = relocate_model ( part = self , target_parent = target_parent , name = name , include_children = include_children ) if include_instances : retrieve_instances_to_copied = list ( self . instances ( ) ) retrieve_parent_instances = list ( target_parent . instances ( ) ) for parent_instance in retrieve_parent_instances : for instance in retrieve_instances_to_copied : instance . populate_descendants ( ) move_part_instance ( part_instance = instance , target_parent = parent_instance , part_model = self , name = instance . name , include_children = include_children ) self . delete ( ) return moved_model elif self . category == Category . INSTANCE and target_parent . category == Category . INSTANCE : moved_instance = relocate_instance ( part = self , target_parent = target_parent , name = name , include_children = include_children ) try : self . delete ( ) except APIError : model_of_instance = self . model ( ) model_of_instance . delete ( ) return moved_instance else : raise IllegalArgumentError ( 'part "{}" and target parent "{}" must have the same category' )
4,459
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L787-L839
[ "def", "generate_signed_url_v4", "(", "credentials", ",", "resource", ",", "expiration", ",", "api_access_endpoint", "=", "DEFAULT_ENDPOINT", ",", "method", "=", "\"GET\"", ",", "content_md5", "=", "None", ",", "content_type", "=", "None", ",", "response_type", "=", "None", ",", "response_disposition", "=", "None", ",", "generation", "=", "None", ",", "headers", "=", "None", ",", "query_parameters", "=", "None", ",", "_request_timestamp", "=", "None", ",", "# for testing only", ")", ":", "ensure_signed_credentials", "(", "credentials", ")", "expiration_seconds", "=", "get_expiration_seconds_v4", "(", "expiration", ")", "if", "_request_timestamp", "is", "None", ":", "now", "=", "NOW", "(", ")", "request_timestamp", "=", "now", ".", "strftime", "(", "\"%Y%m%dT%H%M%SZ\"", ")", "datestamp", "=", "now", ".", "date", "(", ")", ".", "strftime", "(", "\"%Y%m%d\"", ")", "else", ":", "request_timestamp", "=", "_request_timestamp", "datestamp", "=", "_request_timestamp", "[", ":", "8", "]", "client_email", "=", "credentials", ".", "signer_email", "credential_scope", "=", "\"{}/auto/storage/goog4_request\"", ".", "format", "(", "datestamp", ")", "credential", "=", "\"{}/{}\"", ".", "format", "(", "client_email", ",", "credential_scope", ")", "if", "headers", "is", "None", ":", "headers", "=", "{", "}", "if", "content_type", "is", "not", "None", ":", "headers", "[", "\"Content-Type\"", "]", "=", "content_type", "if", "content_md5", "is", "not", "None", ":", "headers", "[", "\"Content-MD5\"", "]", "=", "content_md5", "header_names", "=", "[", "key", ".", "lower", "(", ")", "for", "key", "in", "headers", "]", "if", "\"host\"", "not", "in", "header_names", ":", "headers", "[", "\"Host\"", "]", "=", "\"storage.googleapis.com\"", "if", "method", ".", "upper", "(", ")", "==", "\"RESUMABLE\"", ":", "method", "=", "\"POST\"", "headers", "[", "\"x-goog-resumable\"", "]", "=", "\"start\"", "canonical_headers", ",", "ordered_headers", "=", "get_canonical_headers", "(", "headers", ")", "canonical_header_string", "=", "(", "\"\\n\"", ".", "join", "(", "canonical_headers", ")", "+", "\"\\n\"", ")", "# Yes, Virginia, the extra newline is part of the spec.", "signed_headers", "=", "\";\"", ".", "join", "(", "[", "key", "for", "key", ",", "_", "in", "ordered_headers", "]", ")", "if", "query_parameters", "is", "None", ":", "query_parameters", "=", "{", "}", "else", ":", "query_parameters", "=", "{", "key", ":", "value", "or", "\"\"", "for", "key", ",", "value", "in", "query_parameters", ".", "items", "(", ")", "}", "query_parameters", "[", "\"X-Goog-Algorithm\"", "]", "=", "\"GOOG4-RSA-SHA256\"", "query_parameters", "[", "\"X-Goog-Credential\"", "]", "=", "credential", "query_parameters", "[", "\"X-Goog-Date\"", "]", "=", "request_timestamp", "query_parameters", "[", "\"X-Goog-Expires\"", "]", "=", "expiration_seconds", "query_parameters", "[", "\"X-Goog-SignedHeaders\"", "]", "=", "signed_headers", "if", "response_type", "is", "not", "None", ":", "query_parameters", "[", "\"response-content-type\"", "]", "=", "response_type", "if", "response_disposition", "is", "not", "None", ":", "query_parameters", "[", "\"response-content-disposition\"", "]", "=", "response_disposition", "if", "generation", "is", "not", "None", ":", "query_parameters", "[", "\"generation\"", "]", "=", "generation", "ordered_query_parameters", "=", "sorted", "(", "query_parameters", ".", "items", "(", ")", ")", "canonical_query_string", "=", "six", ".", "moves", ".", "urllib", ".", "parse", ".", "urlencode", "(", "ordered_query_parameters", ")", "canonical_elements", "=", "[", "method", ",", "resource", ",", "canonical_query_string", ",", "canonical_header_string", ",", "signed_headers", ",", "\"UNSIGNED-PAYLOAD\"", ",", "]", "canonical_request", "=", "\"\\n\"", ".", "join", "(", "canonical_elements", ")", "canonical_request_hash", "=", "hashlib", ".", "sha256", "(", "canonical_request", ".", "encode", "(", "\"ascii\"", ")", ")", ".", "hexdigest", "(", ")", "string_elements", "=", "[", "\"GOOG4-RSA-SHA256\"", ",", "request_timestamp", ",", "credential_scope", ",", "canonical_request_hash", ",", "]", "string_to_sign", "=", "\"\\n\"", ".", "join", "(", "string_elements", ")", "signature_bytes", "=", "credentials", ".", "sign_bytes", "(", "string_to_sign", ".", "encode", "(", "\"ascii\"", ")", ")", "signature", "=", "binascii", ".", "hexlify", "(", "signature_bytes", ")", ".", "decode", "(", "\"ascii\"", ")", "return", "\"{}{}?{}&X-Goog-Signature={}\"", ".", "format", "(", "api_access_endpoint", ",", "resource", ",", "canonical_query_string", ",", "signature", ")" ]
Internal function that is used for generation of the page where notebooks are organized by difficulty level .
def _generate_notebook_by_difficulty_body ( notebook_object , dict_by_difficulty ) : difficulty_keys = list ( dict_by_difficulty . keys ( ) ) difficulty_keys . sort ( ) for difficulty in difficulty_keys : markdown_cell = STAR_TABLE_HEADER markdown_cell = _set_star_value ( markdown_cell , int ( difficulty ) ) for notebook_file in dict_by_difficulty [ str ( difficulty ) ] : split_path = notebook_file . split ( "/" ) notebook_type = split_path [ - 2 ] notebook_name = split_path [ - 1 ] . split ( "&" ) [ 0 ] notebook_title = split_path [ - 1 ] . split ( "&" ) [ 1 ] markdown_cell += "\n\t<tr>\n\t\t<td width='20%' class='header_image_color_" + str ( NOTEBOOK_KEYS [ notebook_type ] ) + "'><img " "src='../../images/icons/" + notebook_type . title ( ) + ".png' width='15%'>\n\t\t</td>" markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + notebook_title + "\n\t\t</td>" markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" "../" + notebook_type . title ( ) + "/" + notebook_name + "'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>" markdown_cell += "</table>" # ==================== Insertion of HTML table in a new Notebook cell ====================== notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( markdown_cell ) )
4,460
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/old/_factory.py#L442-L481
[ "async", "def", "set_max_relative_mod", "(", "self", ",", "max_mod", ",", "timeout", "=", "OTGW_DEFAULT_TIMEOUT", ")", ":", "if", "isinstance", "(", "max_mod", ",", "int", ")", "and", "not", "0", "<=", "max_mod", "<=", "100", ":", "return", "None", "cmd", "=", "OTGW_CMD_MAX_MOD", "status", "=", "{", "}", "ret", "=", "await", "self", ".", "_wait_for_cmd", "(", "cmd", ",", "max_mod", ",", "timeout", ")", "if", "ret", "not", "in", "[", "'-'", ",", "None", "]", ":", "ret", "=", "int", "(", "ret", ")", "if", "ret", "==", "'-'", ":", "status", "[", "DATA_SLAVE_MAX_RELATIVE_MOD", "]", "=", "None", "else", ":", "status", "[", "DATA_SLAVE_MAX_RELATIVE_MOD", "]", "=", "ret", "self", ".", "_update_status", "(", "status", ")", "return", "ret" ]
Internal function intended to generate the biosignalsnotebooks directories in order to the user can visualise and execute the Notebook created with notebook class in Jupyter .
def _generate_dir_structure ( path ) : # ============================ Creation of the main directory ================================== current_dir = ( path + "\\opensignalsfactory_environment" ) . replace ( "\\" , "/" ) if not os . path . isdir ( current_dir ) : os . makedirs ( current_dir ) # ================== Copy of 'images' 'styles' and 'signal_samples' folders ==================== path_cloned_files = ( os . path . abspath ( __file__ ) . split ( os . path . basename ( __file__ ) ) [ 0 ] + "\\notebook_files\\osf_files\\" ) . replace ( "\\" , "/" ) for var in [ "images" , "styles" , "signal_samples" ] : if os . path . isdir ( ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) ) : shutil . rmtree ( ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) ) src = ( path_cloned_files + "\\" + var ) . replace ( "\\" , "/" ) destination = ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) shutil . copytree ( src , destination ) # =========================== Generation of 'Categories' folder ================================ current_dir += "/Categories" if not os . path . isdir ( current_dir ) : os . makedirs ( current_dir ) categories = list ( NOTEBOOK_KEYS . keys ( ) ) for category in categories : if not os . path . isdir ( current_dir + "/" + category ) : os . makedirs ( current_dir + "/" + category ) return current_dir
4,461
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/old/_factory.py#L669-L713
[ "def", "getChargingVoltage", "(", "self", ")", ":", "command", "=", "'$GG'", "currentAndVoltage", "=", "self", ".", "sendCommand", "(", "command", ")", "volts", "=", "float", "(", "currentAndVoltage", "[", "2", "]", ")", "/", "1000", "return", "volts" ]
Check if the provided XYPoint can be recreated by a Hue lamp .
def in_lamp_reach ( p ) : v1 = XYPoint ( Lime . x - Red . x , Lime . y - Red . y ) v2 = XYPoint ( Blue . x - Red . x , Blue . y - Red . y ) q = XYPoint ( p . x - Red . x , p . y - Red . y ) s = cross_product ( q , v2 ) / cross_product ( v1 , v2 ) t = cross_product ( v1 , q ) / cross_product ( v1 , v2 ) return ( s >= 0.0 ) and ( t >= 0.0 ) and ( s + t <= 1.0 )
4,462
https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L18-L27
[ "def", "_process_data", "(", "self", ",", "obj", ")", ":", "assert", "len", "(", "self", ".", "_waiters", ")", ">", "0", ",", "(", "type", "(", "obj", ")", ",", "obj", ")", "waiter", ",", "encoding", ",", "cb", "=", "self", ".", "_waiters", ".", "popleft", "(", ")", "if", "isinstance", "(", "obj", ",", "RedisError", ")", ":", "if", "isinstance", "(", "obj", ",", "ReplyError", ")", ":", "if", "obj", ".", "args", "[", "0", "]", ".", "startswith", "(", "'READONLY'", ")", ":", "obj", "=", "ReadOnlyError", "(", "obj", ".", "args", "[", "0", "]", ")", "_set_exception", "(", "waiter", ",", "obj", ")", "if", "self", ".", "_in_transaction", "is", "not", "None", ":", "self", ".", "_transaction_error", "=", "obj", "else", ":", "if", "encoding", "is", "not", "None", ":", "try", ":", "obj", "=", "decode", "(", "obj", ",", "encoding", ")", "except", "Exception", "as", "exc", ":", "_set_exception", "(", "waiter", ",", "exc", ")", "return", "if", "cb", "is", "not", "None", ":", "try", ":", "obj", "=", "cb", "(", "obj", ")", "except", "Exception", "as", "exc", ":", "_set_exception", "(", "waiter", ",", "exc", ")", "return", "_set_result", "(", "waiter", ",", "obj", ")", "if", "self", ".", "_in_transaction", "is", "not", "None", ":", "self", ".", "_in_transaction", ".", "append", "(", "(", "encoding", ",", "cb", ")", ")" ]
Find the closest point on a line . This point will be reproducible by a Hue lamp .
def get_closest_point_to_line ( A , B , P ) : AP = XYPoint ( P . x - A . x , P . y - A . y ) AB = XYPoint ( B . x - A . x , B . y - A . y ) ab2 = AB . x * AB . x + AB . y * AB . y ap_ab = AP . x * AB . x + AP . y * AB . y t = ap_ab / ab2 if t < 0.0 : t = 0.0 elif t > 1.0 : t = 1.0 return XYPoint ( A . x + AB . x * t , A . y + AB . y * t )
4,463
https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L30-L46
[ "def", "add_binary_media_types", "(", "self", ",", "logical_id", ",", "binary_media_types", ")", ":", "properties", "=", "self", ".", "_get_properties", "(", "logical_id", ")", "binary_media_types", "=", "binary_media_types", "or", "[", "]", "for", "value", "in", "binary_media_types", ":", "normalized_value", "=", "self", ".", "_normalize_binary_media_type", "(", "value", ")", "# If the value is not supported, then just skip it.", "if", "normalized_value", ":", "properties", ".", "binary_media_types", ".", "add", "(", "normalized_value", ")", "else", ":", "LOG", ".", "debug", "(", "\"Unsupported data type of binary media type value of resource '%s'\"", ",", "logical_id", ")" ]
Used to find the closest point to an unreproducible Color is unreproducible on each line in the CIE 1931 triangle .
def get_closest_point_to_point ( xy_point ) : pAB = get_closest_point_to_line ( Red , Lime , xy_point ) pAC = get_closest_point_to_line ( Blue , Red , xy_point ) pBC = get_closest_point_to_line ( Lime , Blue , xy_point ) # Get the distances per point and see which point is closer to our Point. dAB = get_distance_between_two_points ( xy_point , pAB ) dAC = get_distance_between_two_points ( xy_point , pAC ) dBC = get_distance_between_two_points ( xy_point , pBC ) lowest = dAB closest_point = pAB if ( dAC < lowest ) : lowest = dAC closest_point = pAC if ( dBC < lowest ) : lowest = dBC closest_point = pBC # Change the xy value to a value which is within the reach of the lamp. cx = closest_point . x cy = closest_point . y return XYPoint ( cx , cy )
4,464
https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L58-L87
[ "def", "_log_task_info", "(", "headers", ",", "extra_task_info", "=", "None", ")", ":", "ran_at", "=", "time", ".", "time", "(", ")", "task_eta", "=", "float", "(", "headers", ".", "get", "(", "'X-Appengine-Tasketa'", ",", "0.0", ")", ")", "task_info", "=", "{", "'retry_count'", ":", "headers", ".", "get", "(", "'X-Appengine-Taskretrycount'", ",", "''", ")", ",", "'execution_count'", ":", "headers", ".", "get", "(", "'X-Appengine-Taskexecutioncount'", ",", "''", ")", ",", "'task_eta'", ":", "task_eta", ",", "'ran'", ":", "ran_at", ",", "'gae_latency_seconds'", ":", "ran_at", "-", "task_eta", "}", "if", "extra_task_info", ":", "task_info", "[", "'extra'", "]", "=", "extra_task_info", "logging", ".", "debug", "(", "'TASK-INFO: %s'", ",", "json", ".", "dumps", "(", "task_info", ")", ")" ]
Returns X Y coordinates containing the closest avilable CIE 1931 based on the hex_value provided .
def get_xy_from_hex ( hex_value ) : red , green , blue = struct . unpack ( 'BBB' , codecs . decode ( hex_value , 'hex' ) ) r = ( ( red + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( red > 0.04045 ) else ( red / 12.92 ) # pragma: noqa g = ( ( green + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( green > 0.04045 ) else ( green / 12.92 ) # pragma: noqa b = ( ( blue + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( blue > 0.04045 ) else ( blue / 12.92 ) # pragma: noqa X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804 Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169 Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733 if X + Y + Z == 0 : cx = cy = 0 else : cx = X / ( X + Y + Z ) cy = Y / ( X + Y + Z ) # Check if the given XY value is within the colourreach of our lamps. xy_point = XYPoint ( cx , cy ) is_in_reach = in_lamp_reach ( xy_point ) if not is_in_reach : xy_point = get_closest_point_to_point ( xy_point ) return xy_point
4,465
https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L90-L117
[ "def", "clear", "(", "self", ")", ":", "io_loop", "=", "IOLoop", ".", "current", "(", ")", "while", "self", ".", "_tombstones", ":", "_", ",", "req_timeout", "=", "self", ".", "_tombstones", ".", "popitem", "(", ")", "io_loop", ".", "remove_timeout", "(", "req_timeout", ")" ]
Returns list of other keys that are mapped to the same value as specified key .
def get_other_keys ( self , key , including_current = False ) : other_keys = [ ] if key in self : other_keys . extend ( self . __dict__ [ str ( type ( key ) ) ] [ key ] ) if not including_current : other_keys . remove ( key ) return other_keys
4,466
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L167-L176
[ "def", "upload", "(", "device", ",", "file_path", ",", "progress_callback", "=", "None", ",", "debug", "=", "False", ")", ":", "def", "progress_stage", "(", "stage", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Callback to update progress for the specified stage.\"\"\"", "if", "progress_callback", "is", "not", "None", ":", "progress_callback", "(", "stage", ",", "*", "*", "kwargs", ")", "return", "stage", "if", "device", "is", "None", ":", "raise", "NoDeviceError", "(", "'No device specified for firmware upload.'", ")", "fds", "=", "[", "device", ".", "_device", ".", "fileno", "(", ")", "]", "# Read firmware file into memory", "try", ":", "write_queue", "=", "read_firmware_file", "(", "file_path", ")", "except", "IOError", "as", "err", ":", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_ERROR", ",", "error", "=", "str", "(", "err", ")", ")", "return", "data_read", "=", "''", "got_response", "=", "False", "running", "=", "True", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_START", ")", "if", "device", ".", "is_reader_alive", "(", ")", ":", "# Close the reader thread and wait for it to die, otherwise", "# it interferes with our reading.", "device", ".", "stop_reader", "(", ")", "while", "device", ".", "_read_thread", ".", "is_alive", "(", ")", ":", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_WAITING", ")", "time", ".", "sleep", "(", "0.5", ")", "time", ".", "sleep", "(", "3", ")", "try", ":", "while", "running", ":", "rr", ",", "wr", ",", "_", "=", "select", ".", "select", "(", "fds", ",", "fds", ",", "[", "]", ",", "0.5", ")", "if", "len", "(", "rr", ")", "!=", "0", ":", "response", "=", "Firmware", ".", "read", "(", "device", ")", "for", "c", "in", "response", ":", "# HACK: Python 3 / PySerial hack.", "if", "isinstance", "(", "c", ",", "int", ")", ":", "c", "=", "chr", "(", "c", ")", "if", "c", "==", "'\\xff'", "or", "c", "==", "'\\r'", ":", "# HACK: odd case for our mystery \\xff byte.", "# Boot started, start looking for the !boot message", "if", "data_read", ".", "startswith", "(", "\"!sn\"", ")", ":", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_BOOT", ")", "# Entered bootloader upload mode, start uploading", "elif", "data_read", ".", "startswith", "(", "\"!load\"", ")", ":", "got_response", "=", "True", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_UPLOADING", ")", "# Checksum error", "elif", "data_read", "==", "'!ce'", ":", "running", "=", "False", "raise", "UploadChecksumError", "(", "\"Checksum error in {0}\"", ".", "format", "(", "file_path", ")", ")", "# Bad data", "elif", "data_read", "==", "'!no'", ":", "running", "=", "False", "raise", "UploadError", "(", "\"Incorrect data sent to bootloader.\"", ")", "# Firmware upload complete", "elif", "data_read", "==", "'!ok'", ":", "running", "=", "False", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_DONE", ")", "# All other responses are valid during upload.", "else", ":", "got_response", "=", "True", "if", "stage", "==", "Firmware", ".", "STAGE_UPLOADING", ":", "progress_stage", "(", "stage", ")", "data_read", "=", "''", "elif", "c", "==", "'\\n'", ":", "pass", "else", ":", "data_read", "+=", "c", "if", "len", "(", "wr", ")", "!=", "0", ":", "# Reboot device", "if", "stage", "in", "[", "Firmware", ".", "STAGE_START", ",", "Firmware", ".", "STAGE_WAITING", "]", ":", "device", ".", "write", "(", "'='", ")", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_WAITING_ON_LOADER", ")", "# Enter bootloader", "elif", "stage", "==", "Firmware", ".", "STAGE_BOOT", ":", "device", ".", "write", "(", "'='", ")", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_LOAD", ")", "# Upload firmware", "elif", "stage", "==", "Firmware", ".", "STAGE_UPLOADING", ":", "if", "len", "(", "write_queue", ")", ">", "0", "and", "got_response", "==", "True", ":", "got_response", "=", "False", "device", ".", "write", "(", "write_queue", ".", "popleft", "(", ")", ")", "except", "UploadError", "as", "err", ":", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_ERROR", ",", "error", "=", "str", "(", "err", ")", ")", "else", ":", "stage", "=", "progress_stage", "(", "Firmware", ".", "STAGE_DONE", ")" ]
Returns an iterator over the dictionary s keys .
def iterkeys ( self , key_type = None , return_all_keys = False ) : if ( key_type is not None ) : the_key = str ( key_type ) if the_key in self . __dict__ : for key in self . __dict__ [ the_key ] . keys ( ) : if return_all_keys : yield self . __dict__ [ the_key ] [ key ] else : yield key else : for keys in self . items_dict . keys ( ) : yield keys
4,467
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L201-L217
[ "def", "area_orifice", "(", "Height", ",", "RatioVCOrifice", ",", "FlowRate", ")", ":", "#Checking input validity", "ut", ".", "check_range", "(", "[", "Height", ",", "\">0\"", ",", "\"Height\"", "]", ",", "[", "FlowRate", ",", "\">0\"", ",", "\"Flow rate\"", "]", ",", "[", "RatioVCOrifice", ",", "\"0-1, >0\"", ",", "\"VC orifice ratio\"", "]", ")", "return", "FlowRate", "/", "(", "RatioVCOrifice", "*", "np", ".", "sqrt", "(", "2", "*", "gravity", ".", "magnitude", "*", "Height", ")", ")" ]
Returns an iterator over the dictionary s values .
def itervalues ( self , key_type = None ) : if ( key_type is not None ) : intermediate_key = str ( key_type ) if intermediate_key in self . __dict__ : for direct_key in self . __dict__ [ intermediate_key ] . values ( ) : yield self . items_dict [ direct_key ] else : for value in self . items_dict . values ( ) : yield value
4,468
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L219-L230
[ "def", "segment_centre_of_mass", "(", "seg", ")", ":", "h", "=", "mm", ".", "segment_length", "(", "seg", ")", "r0", "=", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", "r1", "=", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", "num", "=", "r0", "*", "r0", "+", "2", "*", "r0", "*", "r1", "+", "3", "*", "r1", "*", "r1", "denom", "=", "4", "*", "(", "r0", "*", "r0", "+", "r0", "*", "r1", "+", "r1", "*", "r1", ")", "centre_of_mass_z_loc", "=", "num", "/", "denom", "return", "seg", "[", "0", "]", "[", "COLS", ".", "XYZ", "]", "+", "(", "centre_of_mass_z_loc", "/", "h", ")", "*", "(", "seg", "[", "1", "]", "[", "COLS", ".", "XYZ", "]", "-", "seg", "[", "0", "]", "[", "COLS", ".", "XYZ", "]", ")" ]
Returns a copy of the dictionary s keys .
def keys ( self , key_type = None ) : if key_type is not None : intermediate_key = str ( key_type ) if intermediate_key in self . __dict__ : return self . __dict__ [ intermediate_key ] . keys ( ) else : all_keys = { } # in order to preserve keys() type (dict_keys for python3) for keys in self . items_dict . keys ( ) : all_keys [ keys ] = None return all_keys . keys ( )
4,469
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L239-L251
[ "def", "compare_balance", "(", "self", ",", "operator", ",", "or_equals", ",", "amount", ")", ":", "amount", "=", "int", "(", "amount", ")", "if", "operator", "==", "'less'", ":", "if", "or_equals", ":", "self", ".", "assertLessEqual", "(", "self", ".", "balance", ",", "amount", ")", "else", ":", "self", ".", "assertLess", "(", "self", ".", "balance", ",", "amount", ")", "elif", "or_equals", ":", "self", ".", "assertGreaterEqual", "(", "self", ".", "balance", ",", "amount", ")", "else", ":", "self", ".", "assertGreater", "(", "self", ".", "balance", ",", "amount", ")" ]
Returns a copy of the dictionary s values .
def values ( self , key_type = None ) : if ( key_type is not None ) : all_items = { } # in order to preserve keys() type (dict_values for python3) keys_used = set ( ) direct_key = str ( key_type ) if direct_key in self . __dict__ : for intermediate_key in self . __dict__ [ direct_key ] . values ( ) : if not intermediate_key in keys_used : all_items [ intermediate_key ] = self . items_dict [ intermediate_key ] keys_used . add ( intermediate_key ) return all_items . values ( ) else : return self . items_dict . values ( )
4,470
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L253-L268
[ "def", "get_expr_id", "(", "self", ",", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", "=", "None", ")", ":", "# create string from instrument set", "instruments", "=", "ifos_from_instrument_set", "(", "instruments", ")", "# look for the ID", "for", "row", "in", "self", ":", "if", "(", "row", ".", "search_group", ",", "row", ".", "search", ",", "row", ".", "lars_id", ",", "row", ".", "instruments", ",", "row", ".", "gps_start_time", ",", "row", ".", "gps_end_time", ",", "row", ".", "comments", ")", "==", "(", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", ")", ":", "# found it", "return", "row", ".", "experiment_id", "# experiment not found in table", "return", "None" ]
Internal method to add an item to the multi - key dictionary
def __add_item ( self , item , keys = None ) : if ( not keys or not len ( keys ) ) : raise Exception ( 'Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!' % ( self . __class__ . __name__ , str ( item ) ) ) direct_key = tuple ( keys ) # put all keys in a tuple, and use it as a key for key in keys : key_type = str ( type ( key ) ) # store direct key as a value in an intermediate dictionary if ( not key_type in self . __dict__ ) : self . __setattr__ ( key_type , dict ( ) ) self . __dict__ [ key_type ] [ key ] = direct_key # store the value in the actual dictionary if ( not 'items_dict' in self . __dict__ ) : self . items_dict = dict ( ) self . items_dict [ direct_key ] = item
4,471
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L277-L294
[ "def", "extract_bus_routine", "(", "page", ")", ":", "if", "not", "isinstance", "(", "page", ",", "pq", ")", ":", "page", "=", "pq", "(", "page", ")", "stations", "=", "extract_stations", "(", "page", ")", "return", "{", "# Routine name.", "'name'", ":", "extract_routine_name", "(", "page", ")", ",", "# Bus stations.", "'stations'", ":", "stations", ",", "# Current routine.", "'current'", ":", "extract_current_routine", "(", "page", ",", "stations", ")", "}" ]
Return the value at index specified as key .
def get ( self , key , default = None ) : if key in self : return self . items_dict [ self . __dict__ [ str ( type ( key ) ) ] [ key ] ] else : return default
4,472
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L296-L301
[ "def", "draw_header", "(", "self", ",", "stream", ",", "header", ")", ":", "stream", ".", "writeln", "(", "'='", "*", "(", "len", "(", "header", ")", "+", "4", ")", ")", "stream", ".", "writeln", "(", "'| '", "+", "header", "+", "' |'", ")", "stream", ".", "writeln", "(", "'='", "*", "(", "len", "(", "header", ")", "+", "4", ")", ")", "stream", ".", "writeln", "(", ")" ]
Extract messages from Django template string .
def extract_translations ( self , string ) : trans = [ ] for t in Lexer ( string . decode ( "utf-8" ) , None ) . tokenize ( ) : if t . token_type == TOKEN_BLOCK : if not t . contents . startswith ( ( self . tranz_tag , self . tranzchoice_tag ) ) : continue is_tranzchoice = t . contents . startswith ( self . tranzchoice_tag + " " ) kwargs = { "id" : self . _match_to_transvar ( id_re , t . contents ) , "number" : self . _match_to_transvar ( number_re , t . contents ) , "domain" : self . _match_to_transvar ( domain_re , t . contents ) , "locale" : self . _match_to_transvar ( locale_re , t . contents ) , "is_transchoice" : is_tranzchoice , "parameters" : TransVar ( [ x . split ( "=" ) [ 0 ] . strip ( ) for x in properties_re . findall ( t . contents ) if x ] , TransVar . LITERAL ) , "lineno" : t . lineno , } trans . append ( Translation ( * * kwargs ) ) return trans
4,473
https://github.com/adamziel/django_translate/blob/43d8ef94a5c230abbdc89f3dbc623313fde998f2/django_translate/extractors/django_template.py#L32-L58
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Returns the next marker .
def next ( self ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) self . _n += 1 if self . _n > self . _nb_markers : raise StopIteration ( ) return self . _bim . index [ self . _n - 1 ] , self . _read_current_marker ( )
4,474
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L181-L196
[ "def", "remove_organization", "(", "self", ",", "service_desk_id", ",", "organization_id", ")", ":", "log", ".", "warning", "(", "'Removing organization...'", ")", "url", "=", "'rest/servicedeskapi/servicedesk/{}/organization'", ".", "format", "(", "service_desk_id", ")", "data", "=", "{", "'organizationId'", ":", "organization_id", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
Reads the current marker and returns its genotypes .
def _read_current_marker ( self ) : return self . _geno_values [ np . frombuffer ( self . _bed . read ( self . _nb_bytes ) , dtype = np . uint8 ) ] . flatten ( order = "C" ) [ : self . _nb_samples ]
4,475
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L198-L202
[ "def", "gc_velocity_update", "(", "particle", ",", "social", ",", "state", ")", ":", "gbest", "=", "state", ".", "swarm", "[", "gbest_idx", "(", "state", ".", "swarm", ")", "]", ".", "position", "if", "not", "np", ".", "array_equal", "(", "gbest", ",", "particle", ".", "position", ")", ":", "return", "std_velocity", "(", "particle", ",", "social", ",", "state", ")", "rho", "=", "state", ".", "params", "[", "'rho'", "]", "inertia", "=", "state", ".", "params", "[", "'inertia'", "]", "v_max", "=", "state", ".", "params", "[", "'v_max'", "]", "size", "=", "particle", ".", "position", ".", "size", "r2", "=", "state", ".", "rng", ".", "uniform", "(", "0.0", ",", "1.0", ",", "size", ")", "velocity", "=", "__gc_velocity_equation__", "(", "inertia", ",", "rho", ",", "r2", ",", "particle", ",", "gbest", ")", "return", "__clamp__", "(", "velocity", ",", "v_max", ")" ]
Gets to a certain marker position in the BED file .
def seek ( self , n ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) if 0 <= n < self . _nb_markers : self . _n = n self . _bed . seek ( self . _get_seek_position ( n ) ) else : # Invalid seek value raise ValueError ( "invalid position in BED: {}" . format ( n ) )
4,476
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L204-L220
[ "def", "_create_auth", "(", "team", ",", "timeout", "=", "None", ")", ":", "url", "=", "get_registry_url", "(", "team", ")", "contents", "=", "_load_auth", "(", ")", "auth", "=", "contents", ".", "get", "(", "url", ")", "if", "auth", "is", "not", "None", ":", "# If the access token expires within a minute, update it.", "if", "auth", "[", "'expires_at'", "]", "<", "time", ".", "time", "(", ")", "+", "60", ":", "try", ":", "auth", "=", "_update_auth", "(", "team", ",", "auth", "[", "'refresh_token'", "]", ",", "timeout", ")", "except", "CommandException", "as", "ex", ":", "raise", "CommandException", "(", "\"Failed to update the access token (%s). Run `quilt login%s` again.\"", "%", "(", "ex", ",", "' '", "+", "team", "if", "team", "else", "''", ")", ")", "contents", "[", "url", "]", "=", "auth", "_save_auth", "(", "contents", ")", "return", "auth" ]
Reads the BIM file .
def _read_bim ( self ) : # Reading the BIM file and setting the values bim = pd . read_csv ( self . bim_filename , delim_whitespace = True , names = [ "chrom" , "snp" , "cm" , "pos" , "a1" , "a2" ] , dtype = dict ( snp = str , a1 = str , a2 = str ) ) # Saving the index as integer bim [ "i" ] = bim . index # Checking for duplicated markers try : bim = bim . set_index ( "snp" , verify_integrity = True ) self . _has_duplicated = False except ValueError as e : # Setting this flag to true self . _has_duplicated = True # Finding the duplicated markers duplicated = bim . snp . duplicated ( keep = False ) duplicated_markers = bim . loc [ duplicated , "snp" ] duplicated_marker_counts = duplicated_markers . value_counts ( ) # The dictionary that will contain information about the duplicated # markers self . _dup_markers = { m : [ ] for m in duplicated_marker_counts . index } # Logging a warning logger . warning ( "Duplicated markers found" ) for marker , count in duplicated_marker_counts . iteritems ( ) : logger . warning ( " - {}: {:,d} times" . format ( marker , count ) ) logger . warning ( "Appending ':dupX' to the duplicated markers " "according to their location in the BIM file" ) # Renaming the markers counter = Counter ( ) for i , marker in duplicated_markers . iteritems ( ) : counter [ marker ] += 1 new_name = "{}:dup{}" . format ( marker , counter [ marker ] ) bim . loc [ i , "snp" ] = new_name # Updating the dictionary containing the duplicated markers self . _dup_markers [ marker ] . append ( new_name ) # Resetting the index bim = bim . set_index ( "snp" , verify_integrity = True ) # Encoding the allele # - The original 0 is the actual 2 (a1/a1) # - The original 2 is the actual 1 (a1/a2) # - The original 3 is the actual 0 (a2/a2) # - The original 1 is the actual -1 (no call) allele_encoding = np . array ( [ bim . a2 * 2 , bim . a1 + bim . a2 , bim . a1 * 2 , list ( repeat ( "00" , bim . shape [ 0 ] ) ) ] , dtype = "U2" , ) self . _allele_encoding = allele_encoding . T # Saving the data in the object self . _bim = bim [ [ "chrom" , "pos" , "cm" , "a1" , "a2" , "i" ] ] self . _nb_markers = self . _bim . shape [ 0 ]
4,477
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L231-L295
[ "def", "logout", "(", "self", ")", ":", "self", ".", "revoke_refresh_token", "(", ")", "self", ".", "revoke_access_token", "(", ")", "self", ".", "_username", ",", "self", ".", "_password", "=", "None", ",", "None" ]
Reads the FAM file .
def _read_fam ( self ) : # Reading the FAM file and setting the values fam = pd . read_csv ( self . fam_filename , delim_whitespace = True , names = [ "fid" , "iid" , "father" , "mother" , "gender" , "status" ] , dtype = dict ( fid = str , iid = str , father = str , mother = str ) ) # Getting the byte and bit location of each samples fam [ "byte" ] = [ int ( np . ceil ( ( 1 + 1 ) / 4.0 ) ) - 1 for i in range ( len ( fam ) ) ] fam [ "bit" ] = [ ( i % 4 ) * 2 for i in range ( len ( fam ) ) ] # Saving the data in the object self . _fam = fam self . _nb_samples = self . _fam . shape [ 0 ]
4,478
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L333-L349
[ "def", "_remove_player", "(", "self", ",", "player_id", ")", ":", "player", "=", "self", ".", "_mpris_players", ".", "get", "(", "player_id", ")", "if", "player", ":", "if", "player", ".", "get", "(", "\"subscription\"", ")", ":", "player", "[", "\"subscription\"", "]", ".", "disconnect", "(", ")", "del", "self", ".", "_mpris_players", "[", "player_id", "]" ]
Reads the BED file .
def _read_bed ( self ) : # Checking if BIM and BAM files were both read if ( self . _bim is None ) or ( self . _fam is None ) : raise RuntimeError ( "no BIM or FAM file were read" ) # The number of bytes per marker self . _nb_bytes = int ( np . ceil ( self . _nb_samples / 4.0 ) ) # Checking the file is valid by looking at the first 3 bytes and the # last entry (correct size) with open ( self . bed_filename , "rb" ) as bed_file : # Checking that the first two bytes are OK if ( ord ( bed_file . read ( 1 ) ) != 108 ) or ( ord ( bed_file . read ( 1 ) ) != 27 ) : raise ValueError ( "not a valid BED file: " "{}" . format ( self . bed_filename ) ) # Checking that the format is SNP-major if ord ( bed_file . read ( 1 ) ) != 1 : raise ValueError ( "not in SNP-major format (please recode): " "{}" . format ( self . bed_filename ) ) # Checking the last entry (for BED corruption) seek_index = self . _get_seek_position ( self . _bim . iloc [ - 1 , : ] . i ) bed_file . seek ( seek_index ) geno = self . _geno_values [ np . frombuffer ( bed_file . read ( self . _nb_bytes ) , dtype = np . uint8 ) ] . flatten ( order = "C" ) [ : self . _nb_samples ] if geno . shape [ 0 ] != self . _nb_samples : raise ValueError ( "invalid number of entries: corrupted BED?" ) # Opening the file for the rest of the operations (reading 3 bytes) self . _bed = open ( self . bed_filename , "rb" ) self . _bed . read ( 3 )
4,479
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L375-L408
[ "def", "listen_dataset_events", "(", "self", ",", "owner_id", ",", "project_id", ",", "dataset_id", ")", ":", "if", "not", "self", ".", "_user_id", ":", "raise", "AmigoCloudError", "(", "self", ".", "error_msg", "[", "'logged_in_websockets'", "]", ")", "url", "=", "'/users/%s/projects/%s/datasets/%s/start_websocket_session'", "response", "=", "self", ".", "get", "(", "url", "%", "(", "owner_id", ",", "project_id", ",", "dataset_id", ")", ")", "websocket_session", "=", "response", "[", "'websocket_session'", "]", "auth_data", "=", "{", "'userid'", ":", "self", ".", "_user_id", ",", "'datasetid'", ":", "dataset_id", ",", "'websocket_session'", ":", "websocket_session", "}", "self", ".", "amigosocket", ".", "emit", "(", "'authenticate'", ",", "auth_data", ")" ]
Writes the BED first 3 bytes .
def _write_bed_header ( self ) : # Writing the first three bytes final_byte = 1 if self . _bed_format == "SNP-major" else 0 self . _bed . write ( bytearray ( ( 108 , 27 , final_byte ) ) )
4,480
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L410-L414
[ "def", "disconnect_all", "(", "self", ",", "context", ",", "ports", ")", ":", "resource_details", "=", "self", ".", "_parse_remote_model", "(", "context", ")", "# execute command", "res", "=", "self", ".", "command_wrapper", ".", "execute_command_with_connection", "(", "context", ",", "self", ".", "virtual_switch_disconnect_command", ".", "disconnect_all", ",", "resource_details", ".", "vm_uuid", ")", "return", "set_command_result", "(", "result", "=", "res", ",", "unpicklable", "=", "False", ")" ]
Iterates over genotypes for a list of markers .
def iter_geno_marker ( self , markers , return_index = False ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) # If string, we change to list if isinstance ( markers , str ) : markers = [ markers ] # Iterating over all markers if return_index : for marker in markers : geno , seek = self . get_geno_marker ( marker , return_index = True ) yield marker , geno , seek else : for marker in markers : yield marker , self . get_geno_marker ( marker )
4,481
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L445-L471
[ "def", "format_log_context", "(", "msg", ",", "connection", "=", "None", ",", "keyspace", "=", "None", ")", ":", "connection_info", "=", "connection", "or", "'DEFAULT_CONNECTION'", "if", "keyspace", ":", "msg", "=", "'[Connection: {0}, Keyspace: {1}] {2}'", ".", "format", "(", "connection_info", ",", "keyspace", ",", "msg", ")", "else", ":", "msg", "=", "'[Connection: {0}] {1}'", ".", "format", "(", "connection_info", ",", "msg", ")", "return", "msg" ]
Gets the genotypes for a given marker .
def get_geno_marker ( self , marker , return_index = False ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) # Check if the marker exists if marker not in self . _bim . index : raise ValueError ( "{}: marker not in BIM" . format ( marker ) ) # Seeking to the correct position seek_index = self . _bim . loc [ marker , "i" ] self . seek ( seek_index ) if return_index : return self . _read_current_marker ( ) , seek_index return self . _read_current_marker ( )
4,482
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L489-L513
[ "async", "def", "save", "(", "self", ")", ":", "orig_owner_data", "=", "self", ".", "_orig_data", "[", "'owner_data'", "]", "new_owner_data", "=", "dict", "(", "self", ".", "_data", "[", "'owner_data'", "]", ")", "self", ".", "_changed_data", ".", "pop", "(", "'owner_data'", ",", "None", ")", "await", "super", "(", "Machine", ",", "self", ")", ".", "save", "(", ")", "params_diff", "=", "calculate_dict_diff", "(", "orig_owner_data", ",", "new_owner_data", ")", "if", "len", "(", "params_diff", ")", ">", "0", ":", "params_diff", "[", "'system_id'", "]", "=", "self", ".", "system_id", "await", "self", ".", "_handler", ".", "set_owner_data", "(", "*", "*", "params_diff", ")", "self", ".", "_data", "[", "'owner_data'", "]", "=", "self", ".", "_data", "[", "'owner_data'", "]" ]
Write genotypes to binary file .
def write_genotypes ( self , genotypes ) : if self . _mode != "w" : raise UnsupportedOperation ( "not available in 'r' mode" ) # Initializing the number of samples if required if self . _nb_values is None : self . _nb_values = len ( genotypes ) # Checking the expected number of samples if self . _nb_values != len ( genotypes ) : raise ValueError ( "{:,d} samples expected, got {:,d}" . format ( self . _nb_values , len ( genotypes ) , ) ) # Writing to file byte_array = [ g [ 0 ] | ( g [ 1 ] << 2 ) | ( g [ 2 ] << 4 ) | ( g [ 3 ] << 6 ) for g in self . _grouper ( ( _byte_recode [ geno ] for geno in genotypes ) , 4 ) ] self . _bed . write ( bytearray ( byte_array ) )
4,483
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L531-L557
[ "def", "get_user_last_submissions", "(", "self", ",", "limit", "=", "5", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "{", "}", "request", ".", "update", "(", "{", "\"username\"", ":", "self", ".", "_user_manager", ".", "session_username", "(", ")", "}", ")", "# Before, submissions were first sorted by submission date, then grouped", "# and then resorted by submission date before limiting. Actually, grouping", "# and pushing, keeping the max date, followed by result filtering is much more", "# efficient", "data", "=", "self", ".", "_database", ".", "submissions", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "request", "}", ",", "{", "\"$group\"", ":", "{", "\"_id\"", ":", "{", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", "}", ",", "\"submitted_on\"", ":", "{", "\"$max\"", ":", "\"$submitted_on\"", "}", ",", "\"submissions\"", ":", "{", "\"$push\"", ":", "{", "\"_id\"", ":", "\"$_id\"", ",", "\"result\"", ":", "\"$result\"", ",", "\"status\"", ":", "\"$status\"", ",", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", ",", "\"submitted_on\"", ":", "\"$submitted_on\"", "}", "}", ",", "}", "}", ",", "{", "\"$project\"", ":", "{", "\"submitted_on\"", ":", "1", ",", "\"submissions\"", ":", "{", "# This could be replaced by $filter if mongo v3.2 is set as dependency", "\"$setDifference\"", ":", "[", "{", "\"$map\"", ":", "{", "\"input\"", ":", "\"$submissions\"", ",", "\"as\"", ":", "\"submission\"", ",", "\"in\"", ":", "{", "\"$cond\"", ":", "[", "{", "\"$eq\"", ":", "[", "\"$submitted_on\"", ",", "\"$$submission.submitted_on\"", "]", "}", ",", "\"$$submission\"", ",", "False", "]", "}", "}", "}", ",", "[", "False", "]", "]", "}", "}", "}", ",", "{", "\"$sort\"", ":", "{", "\"submitted_on\"", ":", "pymongo", ".", "DESCENDING", "}", "}", ",", "{", "\"$limit\"", ":", "limit", "}", "]", ")", "return", "[", "item", "[", "\"submissions\"", "]", "[", "0", "]", "for", "item", "in", "data", "]" ]
Generic Time Series Read from File Method
def _read ( self , directory , filename , session , path , name , extension , spatial = None , spatialReferenceID = None , replaceParamFile = None ) : # Assign file extension attribute to file object self . fileExtension = extension timeSeries = [ ] # Open file and parse into a data structure with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) record = { 'time' : sline [ 0 ] , 'values' : [ ] } for idx in range ( 1 , len ( sline ) ) : record [ 'values' ] . append ( sline [ idx ] ) timeSeries . append ( record ) self . _createTimeSeriesObjects ( timeSeries , filename )
4,484
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L60-L82
[ "def", "_get_cursor", "(", "self", ",", "n_retries", "=", "1", ")", ":", "n_tries_rem", "=", "n_retries", "+", "1", "while", "n_tries_rem", ">", "0", ":", "try", ":", "conn", "=", "self", ".", "_pool", ".", "getconn", "(", ")", "if", "self", ".", "pooling", "else", "self", ".", "_conn", "# autocommit=True obviates closing explicitly", "conn", ".", "autocommit", "=", "True", "cur", "=", "conn", ".", "cursor", "(", "cursor_factory", "=", "psycopg2", ".", "extras", ".", "DictCursor", ")", "cur", ".", "execute", "(", "\"set search_path = {self.url.schema};\"", ".", "format", "(", "self", "=", "self", ")", ")", "yield", "cur", "# contextmanager executes these when context exits", "cur", ".", "close", "(", ")", "if", "self", ".", "pooling", ":", "self", ".", "_pool", ".", "putconn", "(", "conn", ")", "break", "except", "psycopg2", ".", "OperationalError", ":", "_logger", ".", "warning", "(", "\"Lost connection to {url}; attempting reconnect\"", ".", "format", "(", "url", "=", "self", ".", "url", ")", ")", "if", "self", ".", "pooling", ":", "self", ".", "_pool", ".", "closeall", "(", ")", "self", ".", "_connect", "(", ")", "_logger", ".", "warning", "(", "\"Reconnected to {url}\"", ".", "format", "(", "url", "=", "self", ".", "url", ")", ")", "n_tries_rem", "-=", "1", "else", ":", "# N.B. Probably never reached", "raise", "HGVSError", "(", "\"Permanently lost connection to {url} ({n} retries)\"", ".", "format", "(", "url", "=", "self", ".", "url", ",", "n", "=", "n_retries", ")", ")" ]
Generic Time Series Write to File Method
def _write ( self , session , openFile , replaceParamFile ) : # Retrieve all time series timeSeries = self . timeSeries # Num TimeSeries numTS = len ( timeSeries ) # Transform into list of dictionaries for pivot tool valList = [ ] for tsNum , ts in enumerate ( timeSeries ) : values = ts . values for value in values : valDict = { 'time' : value . simTime , 'tsNum' : tsNum , 'value' : value . value } valList . append ( valDict ) # Use pivot function (from lib) to pivot the values into # a format that is easy to write. result = pivot ( valList , ( 'time' , ) , ( 'tsNum' , ) , 'value' ) # Write lines for line in result : valString = '' # Compile value string for n in range ( 0 , numTS ) : val = '%.6f' % line [ ( n , ) ] valString = '%s%s%s' % ( valString , ' ' * ( 13 - len ( str ( val ) ) ) , # Fancy spacing trick val ) openFile . write ( ' %.8f%s\n' % ( line [ 'time' ] , valString ) )
4,485
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L84-L121
[ "def", "get_account_balance", "(", "address", ",", "token_type", ",", "hostport", "=", "None", ",", "proxy", "=", "None", ")", ":", "assert", "proxy", "or", "hostport", ",", "'Need proxy or hostport'", "if", "proxy", "is", "None", ":", "proxy", "=", "connect_hostport", "(", "hostport", ")", "balance_schema", "=", "{", "'type'", ":", "'object'", ",", "'properties'", ":", "{", "'balance'", ":", "{", "'type'", ":", "'integer'", ",", "}", ",", "}", ",", "'required'", ":", "[", "'balance'", ",", "]", ",", "}", "schema", "=", "json_response_schema", "(", "balance_schema", ")", "try", ":", "resp", "=", "proxy", ".", "get_account_balance", "(", "address", ",", "token_type", ")", "resp", "=", "json_validate", "(", "schema", ",", "resp", ")", "if", "json_is_error", "(", "resp", ")", ":", "return", "resp", "except", "ValidationError", "as", "e", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "e", ")", "resp", "=", "{", "'error'", ":", "'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "socket", ".", "timeout", ":", "log", ".", "error", "(", "\"Connection timed out\"", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host timed out.'", ",", "'http_status'", ":", "503", "}", "return", "resp", "except", "socket", ".", "error", "as", "se", ":", "log", ".", "error", "(", "\"Connection error {}\"", ".", "format", "(", "se", ".", "errno", ")", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host failed.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "Exception", "as", "ee", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "ee", ")", "log", ".", "error", "(", "\"Caught exception while connecting to Blockstack node: {}\"", ".", "format", "(", "ee", ")", ")", "resp", "=", "{", "'error'", ":", "'Failed to contact Blockstack node. Try again with `--debug`.'", ",", "'http_status'", ":", "500", "}", "return", "resp", "return", "resp", "[", "'balance'", "]" ]
Return time series as pandas dataframe
def as_dataframe ( self ) : time_series = { } for ts_index , ts in enumerate ( self . timeSeries ) : index = [ ] data = [ ] for value in ts . values : index . append ( value . simTime ) data . append ( value . value ) time_series [ ts_index ] = pd . Series ( data , index = index ) return pd . DataFrame ( time_series )
4,486
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L123-L135
[ "def", "clean", "(", "self", ")", ":", "if", "self", ".", "_initialized", ":", "logger", ".", "info", "(", "\"brace yourselves, removing %r\"", ",", "self", ".", "path", ")", "shutil", ".", "rmtree", "(", "self", ".", "path", ")" ]
Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method
def _createTimeSeriesObjects ( self , timeSeries , filename ) : try : # Determine number of value columns valColumns = len ( timeSeries [ 0 ] [ 'values' ] ) # Create List of GSSHAPY TimeSeries objects series = [ ] for i in range ( 0 , valColumns ) : ts = TimeSeries ( ) ts . timeSeriesFile = self series . append ( ts ) for record in timeSeries : for index , value in enumerate ( record [ 'values' ] ) : # Create GSSHAPY TimeSeriesValue objects tsVal = TimeSeriesValue ( simTime = record [ 'time' ] , value = value ) # Associate with appropriate TimeSeries object via the index tsVal . timeSeries = series [ index ] except IndexError : log . warning ( ( '%s was opened, but the contents of the file were empty.' 'This file will not be read into the database.' ) % filename ) except : raise
4,487
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L137-L164
[ "def", "upload_cbn_dir", "(", "dir_path", ",", "manager", ")", ":", "t", "=", "time", ".", "time", "(", ")", "for", "jfg_path", "in", "os", ".", "listdir", "(", "dir_path", ")", ":", "if", "not", "jfg_path", ".", "endswith", "(", "'.jgf'", ")", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "jfg_path", ")", "log", ".", "info", "(", "'opening %s'", ",", "path", ")", "with", "open", "(", "path", ")", "as", "f", ":", "cbn_jgif_dict", "=", "json", ".", "load", "(", "f", ")", "graph", "=", "pybel", ".", "from_cbn_jgif", "(", "cbn_jgif_dict", ")", "out_path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "jfg_path", ".", "replace", "(", "'.jgf'", ",", "'.bel'", ")", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "o", ":", "pybel", ".", "to_bel", "(", "graph", ",", "o", ")", "strip_annotations", "(", "graph", ")", "enrich_pubmed_citations", "(", "manager", "=", "manager", ",", "graph", "=", "graph", ")", "pybel", ".", "to_database", "(", "graph", ",", "manager", "=", "manager", ")", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "'done in %.2f'", ",", "time", ".", "time", "(", ")", "-", "t", ")" ]
Extends deferred operations calling each operation of given Blueprints .
def extend ( self , * blues , memo = None ) : memo = { } if memo is None else memo for blue in blues : if isinstance ( blue , Dispatcher ) : blue = blue . blue ( memo = memo ) for method , kwargs in blue . deferred : getattr ( self , method ) ( * * kwargs ) return self
4,488
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/blue.py#L89-L123
[ "def", "wait", "(", "self", ",", "sensor_name", ",", "condition_or_value", ",", "timeout", "=", "5", ")", ":", "sensor_name", "=", "escape_name", "(", "sensor_name", ")", "sensor", "=", "self", ".", "sensor", "[", "sensor_name", "]", "try", ":", "yield", "sensor", ".", "wait", "(", "condition_or_value", ",", "timeout", ")", "except", "tornado", ".", "gen", ".", "TimeoutError", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "False", ")", "else", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "True", ")" ]
Generic Output Location Read from File Method
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Assign file extension attribute to file object self . fileExtension = extension # Open file and parse into a data structure with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) if len ( sline ) == 1 : self . numLocations = sline [ 0 ] else : # Create GSSHAPY OutputLocation object location = OutputLocation ( linkOrCellI = sline [ 0 ] , nodeOrCellJ = sline [ 1 ] ) # Associate OutputLocation with OutputLocationFile location . outputLocationFile = self
4,489
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/loc.py#L61-L81
[ "def", "detail_dict", "(", "self", ")", ":", "d", "=", "self", ".", "dict", "def", "aug_col", "(", "c", ")", ":", "d", "=", "c", ".", "dict", "d", "[", "'stats'", "]", "=", "[", "s", ".", "dict", "for", "s", "in", "c", ".", "stats", "]", "return", "d", "d", "[", "'table'", "]", "=", "self", ".", "table", ".", "dict", "d", "[", "'table'", "]", "[", "'columns'", "]", "=", "[", "aug_col", "(", "c", ")", "for", "c", "in", "self", ".", "table", ".", "columns", "]", "return", "d" ]
Generic Output Location Write to File Method
def _write ( self , session , openFile , replaceParamFile ) : # Retrieve output locations locations = self . outputLocations # Write lines openFile . write ( '%s\n' % self . numLocations ) for location in locations : openFile . write ( '%s %s\n' % ( location . linkOrCellI , location . nodeOrCellJ ) )
4,490
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/loc.py#L83-L95
[ "def", "current_rolenames", "(", ")", ":", "jwt_data", "=", "get_jwt_data_from_app_context", "(", ")", "if", "'rls'", "not", "in", "jwt_data", ":", "# This is necessary so our set arithmetic works correctly", "return", "set", "(", "[", "'non-empty-but-definitely-not-matching-subset'", "]", ")", "else", ":", "return", "set", "(", "r", ".", "strip", "(", ")", "for", "r", "in", "jwt_data", "[", "'rls'", "]", ".", "split", "(", "','", ")", ")" ]
Creates a dispatcher Flask app .
def web ( self , depth = - 1 , node_data = NONE , node_function = NONE , directory = None , sites = None , run = True ) : options = { 'node_data' : node_data , 'node_function' : node_function } options = { k : v for k , v in options . items ( ) if v is not NONE } from . web import WebMap from . sol import Solution obj = self . dsp if isinstance ( self , Solution ) else self webmap = WebMap ( ) webmap . add_items ( obj , workflow = False , depth = depth , * * options ) if sites is not None : import tempfile directory = directory or tempfile . mkdtemp ( ) sites . add ( webmap . site ( directory , view = run ) ) return webmap
4,491
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/base.py#L27-L109
[ "def", "remove", "(", "self", ",", "product", ",", "img_file_name", ",", "identifierType", "=", "None", ")", ":", "return", "self", ".", "call", "(", "'catalog_product_attribute_media.remove'", ",", "[", "product", ",", "img_file_name", ",", "identifierType", "]", ")" ]
Plots the Dispatcher with a graph in the DOT language with Graphviz .
def plot ( self , workflow = None , view = True , depth = - 1 , name = NONE , comment = NONE , format = NONE , engine = NONE , encoding = NONE , graph_attr = NONE , node_attr = NONE , edge_attr = NONE , body = NONE , node_styles = NONE , node_data = NONE , node_function = NONE , edge_data = NONE , max_lines = NONE , max_width = NONE , directory = None , sites = None , index = False ) : d = { 'name' : name , 'comment' : comment , 'format' : format , 'engine' : engine , 'encoding' : encoding , 'graph_attr' : graph_attr , 'node_attr' : node_attr , 'edge_attr' : edge_attr , 'body' : body , } options = { 'digraph' : { k : v for k , v in d . items ( ) if v is not NONE } or NONE , 'node_styles' : node_styles , 'node_data' : node_data , 'node_function' : node_function , 'edge_data' : edge_data , 'max_lines' : max_lines , # 5 'max_width' : max_width , # 200 } options = { k : v for k , v in options . items ( ) if v is not NONE } from . drw import SiteMap from . sol import Solution if workflow is None and isinstance ( self , Solution ) : workflow = True else : workflow = workflow or False sitemap = SiteMap ( ) sitemap . add_items ( self , workflow = workflow , depth = depth , * * options ) if view : import tempfile directory = directory or tempfile . mkdtemp ( ) if sites is None : sitemap . render ( directory = directory , view = True , index = index ) else : sites . add ( sitemap . site ( directory , view = True , index = index ) ) return sitemap
4,492
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/base.py#L111-L265
[ "def", "remove", "(", "self", ",", "row_or_row_indices", ")", ":", "if", "not", "row_or_row_indices", ":", "return", "if", "isinstance", "(", "row_or_row_indices", ",", "int", ")", ":", "rows_remove", "=", "[", "row_or_row_indices", "]", "else", ":", "rows_remove", "=", "row_or_row_indices", "for", "col", "in", "self", ".", "_columns", ":", "self", ".", "_columns", "[", "col", "]", "=", "[", "elem", "for", "i", ",", "elem", "in", "enumerate", "(", "self", "[", "col", "]", ")", "if", "i", "not", "in", "rows_remove", "]", "return", "self" ]
A convenience wrapper for _get . Adds headers auth and base url by default
def _api_get ( self , url , * * kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers return self . _get ( * * kwargs )
4,493
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L36-L47
[ "def", "config", "(", "self", ",", "configlet", ",", "plane", ",", "*", "*", "attributes", ")", ":", "try", ":", "config_text", "=", "configlet", ".", "format", "(", "*", "*", "attributes", ")", "except", "KeyError", "as", "exp", ":", "raise", "CommandSyntaxError", "(", "\"Configuration template error: {}\"", ".", "format", "(", "str", "(", "exp", ")", ")", ")", "return", "self", ".", "driver", ".", "config", "(", "config_text", ",", "plane", ")" ]
A convenience wrapper for _put . Adds headers auth and base url by default
def _api_put ( self , url , * * kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _put ( * * kwargs )
4,494
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L62-L73
[ "def", "list_snapshots", "(", "config", "=", "'root'", ")", ":", "try", ":", "snapshots", "=", "snapper", ".", "ListSnapshots", "(", "config", ")", "return", "[", "_snapshot_to_data", "(", "s", ")", "for", "s", "in", "snapshots", "]", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing snapshots: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
A convenience wrapper for _post . Adds headers auth and base url by default
def _api_post ( self , url , * * kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _post ( * * kwargs )
4,495
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L87-L98
[ "def", "config", "(", "self", ",", "configlet", ",", "plane", ",", "*", "*", "attributes", ")", ":", "try", ":", "config_text", "=", "configlet", ".", "format", "(", "*", "*", "attributes", ")", "except", "KeyError", "as", "exp", ":", "raise", "CommandSyntaxError", "(", "\"Configuration template error: {}\"", ".", "format", "(", "str", "(", "exp", ")", ")", ")", "return", "self", ".", "driver", ".", "config", "(", "config_text", ",", "plane", ")" ]
A convenience wrapper for _delete . Adds headers auth and base url by default
def _api_delete ( self , url , * * kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _delete ( * * kwargs )
4,496
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L112-L123
[ "def", "trim", "(", "self", ")", ":", "overlay_data_offset", "=", "self", ".", "get_overlay_data_start_offset", "(", ")", "if", "overlay_data_offset", "is", "not", "None", ":", "return", "self", ".", "__data__", "[", ":", "overlay_data_offset", "]", "return", "self", ".", "__data__", "[", ":", "]" ]
Retrieve the raster as a KML document with each cell of the raster represented as a vector polygon . The result is a vector grid of raster cells . Cells with the no data value are excluded .
def getAsKmlGrid ( self , session , path = None , documentName = None , colorRamp = ColorRampEnum . COLOR_RAMP_HUE , alpha = 1.0 , noDataValue = None ) : if type ( self . raster ) != type ( None ) : # Set Document Name if documentName is None : try : documentName = self . filename except AttributeError : documentName = 'default' # Set no data value to default if noDataValue is None : noDataValue = self . defaultNoDataValue # Make sure the raster field is valid converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) # Configure color ramp if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) kmlString = converter . getAsKmlGrid ( tableName = self . tableName , rasterId = self . id , rasterIdFieldName = 'id' , rasterFieldName = self . rasterColumnName , documentName = documentName , alpha = alpha , noDataValue = noDataValue , discreet = self . discreet ) if path : with open ( path , 'w' ) as f : f . write ( kmlString ) return kmlString
4,497
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/rast.py#L33-L91
[ "def", "add_info", "(", "self", ",", "s", ")", ":", "if", "s", "not", "in", "self", ".", "info", ":", "self", ".", "info", ".", "append", "(", "s", ")" ]
Retrieve the raster in the GRASS ASCII Grid format .
def getAsGrassAsciiGrid ( self , session ) : if type ( self . raster ) != type ( None ) : # Make sure the raster field is valid converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) return converter . getAsGrassAsciiRaster ( tableName = self . tableName , rasterIdFieldName = 'id' , rasterId = self . id , rasterFieldName = self . rasterColumnName )
4,498
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/rast.py#L229-L246
[ "def", "on_close", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"> Calling '{0}' Component Framework 'on_close' method.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "map", "(", "self", ".", "unregister_file", ",", "self", ".", "list_files", "(", ")", ")", "if", "self", ".", "store_session", "(", ")", "and", "self", ".", "close_all_files", "(", "leave_first_editor", "=", "False", ")", ":", "return", "True" ]
Stops the Host passed by parameter or all of them if none is specified stopping at the same time all its actors . Should be called at the end of its usage to finish correctly all the connections and threads .
def shutdown ( url = None ) : if url is None : for host in util . hosts . values ( ) : host . shutdown ( ) global core_type core_type = None else : host = util . hosts [ url ] host . shutdown ( )
4,499
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L483-L497
[ "def", "run", "(", ")", ":", "obj", "=", "CompareGOsCli", "(", ")", "obj", ".", "write", "(", "obj", ".", "kws", ".", "get", "(", "'xlsx'", ")", ",", "obj", ".", "kws", ".", "get", "(", "'ofile'", ")", ",", "obj", ".", "kws", ".", "get", "(", "'verbose'", ",", "False", ")", ")" ]