query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Rotate the files to disk .
def rotate ( self ) : self . _logger . info ( 'Rotating data files. New batch number will be: %s' , self . batchno + 1 ) self . estore . close ( ) self . estore = None self . batchno += 1 self . estore = self . _open_event_store ( )
3,600
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L785-L797
[ "def", "_parse_ISBN_EAN", "(", "details", ")", ":", "isbn_ean", "=", "_get_td_or_none", "(", "details", ",", "\"ctl00_ContentPlaceHolder1_tblRowIsbnEan\"", ")", "if", "not", "isbn_ean", ":", "return", "None", ",", "None", "ean", "=", "None", "isbn", "=", "None", "if", "\"/\"", "in", "isbn_ean", ":", "# ISBN and EAN are stored in same string", "isbn", ",", "ean", "=", "isbn_ean", ".", "split", "(", "\"/\"", ")", "isbn", "=", "isbn", ".", "strip", "(", ")", "ean", "=", "ean", ".", "strip", "(", ")", "else", ":", "isbn", "=", "isbn_ean", ".", "strip", "(", ")", "if", "not", "isbn", ":", "isbn", "=", "None", "return", "isbn", ",", "ean" ]
Find the batch number that contains a certain event .
def _find_batch_containing_event ( self , uuid ) : if self . estore . key_exists ( uuid ) : # Reusing already opened DB if possible return self . batchno else : for batchno in range ( self . batchno - 1 , - 1 , - 1 ) : # Iterating backwards here because we are more likely to find # the event in an later archive, than earlier. db = self . _open_event_store ( batchno ) with contextlib . closing ( db ) : if db . key_exists ( uuid ) : return batchno return None
3,601
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L803-L823
[ "def", "catalog_datacenters", "(", "consul_url", "=", "None", ",", "token", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "not", "consul_url", ":", "consul_url", "=", "_get_config", "(", ")", "if", "not", "consul_url", ":", "log", ".", "error", "(", "'No Consul URL found.'", ")", "ret", "[", "'message'", "]", "=", "'No Consul URL found.'", "ret", "[", "'res'", "]", "=", "False", "return", "ret", "function", "=", "'catalog/datacenters'", "ret", "=", "_query", "(", "consul_url", "=", "consul_url", ",", "function", "=", "function", ",", "token", "=", "token", ")", "return", "ret" ]
Instantiate an SyncedRotationEventStores from config .
def from_config ( config , * * options ) : required_args = ( 'storage-backends' , ) optional_args = { 'events_per_batch' : 25000 } rconfig . check_config_options ( "SyncedRotationEventStores" , required_args , tuple ( optional_args . keys ( ) ) , options ) if "events_per_batch" in options : events_per_batch = int ( options [ "events_per_batch" ] ) else : events_per_batch = optional_args [ "events_per_batch" ] estore = SyncedRotationEventStores ( events_per_batch ) for section in options [ 'storage-backends' ] . split ( ' ' ) : try : substore = rconfig . construct_eventstore ( config , section ) estore . add_rotated_store ( substore ) except Exception as e : _logger . exception ( 'Could not instantiate substore from' ' section %s' , section ) estore . close ( ) raise return estore
3,602
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L915-L951
[ "def", "fetch_from_sdr", "(", "folder", "=", "data_folder", ",", "data", "=", "'test'", ")", ":", "url", "=", "\"https://stacks.stanford.edu/file/druid:fn662rv4961/\"", "if", "data", "==", "'test'", ":", "md5_dict", "=", "{", "'5182_1_1.nii.gz'", ":", "'0656e59818538baa7d45311f2581bb4e'", ",", "'5182_15_1.nii.gz'", ":", "'a5a307b581620184baf868cd0df81f89'", ",", "'data.mat'", ":", "'a6275698f2220c65994354d412e6d82e'", ",", "'pure_gaba_P64024.nii.gz'", ":", "'f3e09ec0f00bd9a03910b19bfe731afb'", "}", "elif", "data", "==", "'example'", ":", "md5_dict", "=", "{", "'12_1_PROBE_MEGA_L_Occ.nii.gz'", ":", "'a0571606c1caa16a9d9b00847771bc94'", ",", "'5062_2_1.nii.gz'", ":", "'6f77fb5134bc2841bdfc954390f0f4a4'", "}", "if", "not", "os", ".", "path", ".", "exists", "(", "folder", ")", ":", "print", "(", "'Creating new directory %s'", "%", "folder", ")", "os", ".", "makedirs", "(", "folder", ")", "for", "k", ",", "v", "in", "md5_dict", ".", "items", "(", ")", ":", "fname", "=", "pjoin", "(", "folder", ",", "k", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "print", "(", "'Downloading %s from SDR ...'", "%", "k", ")", "_get_file_data", "(", "fname", ",", "url", "+", "k", ")", "check_md5", "(", "fname", ",", "v", ")", "else", ":", "print", "(", "'File %s is already in place. If you want to fetch it again, please first remove it from the folder %s '", "%", "(", "fname", ",", "folder", ")", ")", "print", "(", "'Done.'", ")", "print", "(", "'Files copied in folder %s'", "%", "folder", ")" ]
Display stream contents in hexadecimal and ASCII format . The stream specified must either be a file - like object that supports the read method to receive bytes or it can be a string .
def hexdump ( stream ) : if isinstance ( stream , six . string_types ) : stream = BytesIO ( stream ) row = 0 while True : data = stream . read ( 16 ) if not data : break hextets = data . encode ( 'hex' ) . ljust ( 32 ) canonical = printable ( data ) print ( '%08x %s %s |%s|' % ( row * 16 , ' ' . join ( hextets [ x : x + 2 ] for x in range ( 0x00 , 0x10 , 2 ) ) , ' ' . join ( hextets [ x : x + 2 ] for x in range ( 0x10 , 0x20 , 2 ) ) , canonical , ) ) row += 1
3,603
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L39-L75
[ "def", "exclude_types", "(", "self", ",", "*", "objs", ")", ":", "for", "o", "in", "objs", ":", "for", "t", "in", "_keytuple", "(", "o", ")", ":", "if", "t", "and", "t", "not", "in", "self", ".", "_excl_d", ":", "self", ".", "_excl_d", "[", "t", "]", "=", "0" ]
Return a printable string from the input sequence
def printable ( sequence ) : return '' . join ( list ( map ( lambda c : c if c in PRINTABLE else '.' , sequence ) ) )
3,604
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L78-L97
[ "def", "_enforce_bounds", "(", "self", ",", "x", ")", ":", "assert", "len", "(", "x", ")", "==", "len", "(", "self", ".", "bounds", ")", "x_enforced", "=", "[", "]", "for", "x_i", ",", "(", "lb", ",", "ub", ")", "in", "zip", "(", "x", ",", "self", ".", "bounds", ")", ":", "if", "x_i", "<", "lb", ":", "if", "x_i", ">", "lb", "-", "(", "ub", "-", "lb", ")", "/", "1e10", ":", "x_enforced", ".", "append", "(", "lb", ")", "else", ":", "x_enforced", ".", "append", "(", "x_i", ")", "elif", "x_i", ">", "ub", ":", "if", "x_i", "<", "ub", "+", "(", "ub", "-", "lb", ")", "/", "1e10", ":", "x_enforced", ".", "append", "(", "ub", ")", "else", ":", "x_enforced", ".", "append", "(", "x_i", ")", "else", ":", "x_enforced", ".", "append", "(", "x_i", ")", "return", "np", ".", "array", "(", "x_enforced", ")" ]
Return a spark line for the given data set .
def sparkline ( data ) : min_value = float ( min ( data ) ) max_value = float ( max ( data ) ) steps = ( max_value - min_value ) / float ( len ( SPARKCHAR ) - 1 ) return '' . join ( [ SPARKCHAR [ int ( ( float ( value ) - min_value ) / steps ) ] for value in data ] )
3,605
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L100-L117
[ "async", "def", "write_non_secret", "(", "self", ",", "storec", ":", "StorageRecord", ",", "replace_meta", ":", "bool", "=", "False", ")", "->", "StorageRecord", ":", "LOGGER", ".", "debug", "(", "'Wallet.write_non_secret >>> storec: %s, replace_meta: %s'", ",", "storec", ",", "replace_meta", ")", "if", "not", "self", ".", "handle", ":", "LOGGER", ".", "debug", "(", "'Wallet.write_non_secret <!< Wallet %s is closed'", ",", "self", ".", "name", ")", "raise", "WalletState", "(", "'Wallet {} is closed'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "not", "StorageRecord", ".", "ok_tags", "(", "storec", ".", "tags", ")", ":", "LOGGER", ".", "debug", "(", "'Wallet.write_non_secret <!< bad storage record tags %s; use flat {str: str} dict'", ",", "storec", ")", "raise", "BadRecord", "(", "'Bad storage record tags {}; use flat {{str:str}} dict'", ".", "format", "(", "storec", ")", ")", "try", ":", "record", "=", "json", ".", "loads", "(", "await", "non_secrets", ".", "get_wallet_record", "(", "self", ".", "handle", ",", "storec", ".", "type", ",", "storec", ".", "id", ",", "json", ".", "dumps", "(", "{", "'retrieveType'", ":", "False", ",", "'retrieveValue'", ":", "True", ",", "'retrieveTags'", ":", "True", "}", ")", ")", ")", "if", "record", "[", "'value'", "]", "!=", "storec", ".", "value", ":", "await", "non_secrets", ".", "update_wallet_record_value", "(", "self", ".", "handle", ",", "storec", ".", "type", ",", "storec", ".", "id", ",", "storec", ".", "value", ")", "except", "IndyError", "as", "x_indy", ":", "if", "x_indy", ".", "error_code", "==", "ErrorCode", ".", "WalletItemNotFound", ":", "await", "non_secrets", ".", "add_wallet_record", "(", "self", ".", "handle", ",", "storec", ".", "type", ",", "storec", ".", "id", ",", "storec", ".", "value", ",", "json", ".", "dumps", "(", "storec", ".", "tags", ")", "if", "storec", ".", "tags", "else", "None", ")", "else", ":", "LOGGER", ".", "debug", "(", "'Wallet.write_non_secret <!< Wallet lookup raised indy error code %s'", ",", "x_indy", ".", "error_code", ")", "raise", "else", ":", "if", "(", "record", "[", "'tags'", "]", "or", "None", ")", "!=", "storec", ".", "tags", ":", "# record maps no tags to {}, not None", "tags", "=", "(", "storec", ".", "tags", "or", "{", "}", ")", "if", "replace_meta", "else", "{", "*", "*", "record", "[", "'tags'", "]", ",", "*", "*", "(", "storec", ".", "tags", "or", "{", "}", ")", "}", "await", "non_secrets", ".", "update_wallet_record_tags", "(", "self", ".", "handle", ",", "storec", ".", "type", ",", "storec", ".", "id", ",", "json", ".", "dumps", "(", "tags", ")", ")", "# indy-sdk takes '{}' instead of None for null tags", "record", "=", "json", ".", "loads", "(", "await", "non_secrets", ".", "get_wallet_record", "(", "self", ".", "handle", ",", "storec", ".", "type", ",", "storec", ".", "id", ",", "json", ".", "dumps", "(", "{", "'retrieveType'", ":", "False", ",", "'retrieveValue'", ":", "True", ",", "'retrieveTags'", ":", "True", "}", ")", ")", ")", "rv", "=", "StorageRecord", "(", "storec", ".", "type", ",", "record", "[", "'value'", "]", ",", "tags", "=", "record", ".", "get", "(", "'tags'", ",", "None", ")", ",", "ident", "=", "record", "[", "'id'", "]", ")", "LOGGER", ".", "debug", "(", "'Wallet.write_non_secret <<< %s'", ",", "rv", ")", "return", "rv" ]
Retrieves the SnowballStemmer for a particular language .
def get_language_stemmer ( language ) : from lunr . languages import SUPPORTED_LANGUAGES from nltk . stem . snowball import SnowballStemmer return SnowballStemmer ( SUPPORTED_LANGUAGES [ language ] )
3,606
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/stemmer.py#L1-L10
[ "def", "_get_total_read_size", "(", "self", ")", ":", "if", "self", ".", "read_size", ":", "read_size", "=", "EVENT_SIZE", "*", "self", ".", "read_size", "else", ":", "read_size", "=", "EVENT_SIZE", "return", "read_size" ]
Wrapper around a NLTK SnowballStemmer which includes stop words for each language .
def nltk_stemmer ( stemmer , token , i = None , tokens = None ) : def wrapped_stem ( token , metadata = None ) : return stemmer . stem ( token ) return token . update ( wrapped_stem )
3,607
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/stemmer.py#L13-L27
[ "def", "_get_total_read_size", "(", "self", ")", ":", "if", "self", ".", "read_size", ":", "read_size", "=", "EVENT_SIZE", "*", "self", ".", "read_size", "else", ":", "read_size", "=", "EVENT_SIZE", "return", "read_size" ]
Returns True if object is not a string but is iterable
def is_seq ( obj ) : if not hasattr ( obj , '__iter__' ) : return False if isinstance ( obj , basestring ) : return False return True
3,608
https://github.com/Othernet-Project/sqlize/blob/f32cb38e4245800ece339b998ae6647c207a8ca5/sqlize/builder.py#L26-L32
[ "def", "use_comparative_gradebook_view", "(", "self", ")", ":", "self", ".", "_gradebook_view", "=", "COMPARATIVE", "# self._get_provider_session('grade_system_gradebook_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_gradebook_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Decorate a migration function with this method to make it available for migrating cases .
def register ( cls , func ) : cls . _add_version_info ( func ) cls . _upgrade_funcs . add ( func ) return func
3,609
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L69-L76
[ "def", "blit_rect", "(", "self", ",", "console", ":", "tcod", ".", "console", ".", "Console", ",", "x", ":", "int", ",", "y", ":", "int", ",", "width", ":", "int", ",", "height", ":", "int", ",", "bg_blend", ":", "int", ",", ")", "->", "None", ":", "lib", ".", "TCOD_image_blit_rect", "(", "self", ".", "image_c", ",", "_console", "(", "console", ")", ",", "x", ",", "y", ",", "width", ",", "height", ",", "bg_blend", ")" ]
Add . source and . target attributes to the registered function .
def _add_version_info ( func ) : pattern = r'v(?P<source>\d+)_to_(?P<target>\d+)$' match = re . match ( pattern , func . __name__ ) if not match : raise ValueError ( "migration function name must match " + pattern ) func . source , func . target = map ( int , match . groups ( ) )
3,610
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L79-L87
[ "def", "destroy_volume_snapshot", "(", "volume_id", ",", "snapshot_id", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_kwargs", ")", "volume", "=", "_get_by_id", "(", "conn", ".", "list_volumes", "(", ")", ",", "volume_id", ")", "snapshot", "=", "_get_by_id", "(", "conn", ".", "list_volume_snapshots", "(", "volume", ")", ",", "snapshot_id", ")", "return", "conn", ".", "destroy_volume_snapshot", "(", "snapshot", ",", "*", "*", "libcloud_kwargs", ")" ]
Migrate the doc from its current version to the target version and return it .
def migrate_doc ( self , doc ) : orig_ver = doc . get ( self . version_attribute_name , 0 ) funcs = self . _get_migrate_funcs ( orig_ver , self . target_version ) for func in funcs : func ( self , doc ) doc [ self . version_attribute_name ] = func . target return doc
3,611
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L89-L99
[ "def", "is_fifo", "(", "name", ")", ":", "name", "=", "os", ".", "path", ".", "expanduser", "(", "name", ")", "stat_structure", "=", "None", "try", ":", "stat_structure", "=", "os", ".", "stat", "(", "name", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "ENOENT", ":", "# If the fifo does not exist in the first place", "return", "False", "else", ":", "raise", "return", "stat", ".", "S_ISFIFO", "(", "stat_structure", ".", "st_mode", ")" ]
Return exactly one function to convert from source to target
def _get_func ( cls , source_ver , target_ver ) : matches = ( func for func in cls . _upgrade_funcs if func . source == source_ver and func . target == target_ver ) try : match , = matches except ValueError : raise ValueError ( f"No migration from {source_ver} to {target_ver}" ) return match
3,612
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L132-L145
[ "async", "def", "process_events_async", "(", "self", ",", "events", ")", ":", "if", "events", ":", "# Synchronize to serialize calls to the processor. The handler is not installed until", "# after OpenAsync returns, so ProcessEventsAsync cannot conflict with OpenAsync. There", "# could be a conflict between ProcessEventsAsync and CloseAsync, however. All calls to", "# CloseAsync are protected by synchronizing too.", "try", ":", "last", "=", "events", "[", "-", "1", "]", "if", "last", "is", "not", "None", ":", "self", ".", "partition_context", ".", "set_offset_and_sequence_number", "(", "last", ")", "await", "self", ".", "processor", ".", "process_events_async", "(", "self", ".", "partition_context", ",", "events", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "await", "self", ".", "process_error_async", "(", "err", ")" ]
Get the Plone UID for this object
def get_uid ( brain_or_object ) : if is_portal ( brain_or_object ) : return '0' if is_brain ( brain_or_object ) and base_hasattr ( brain_or_object , "UID" ) : return brain_or_object . UID return get_object ( brain_or_object ) . UID ( )
3,613
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L366-L378
[ "def", "_get_filesystem_path", "(", "self", ",", "url_path", ",", "basedir", "=", "settings", ".", "MEDIA_ROOT", ")", ":", "if", "url_path", ".", "startswith", "(", "settings", ".", "MEDIA_URL", ")", ":", "url_path", "=", "url_path", "[", "len", "(", "settings", ".", "MEDIA_URL", ")", ":", "]", "# strip media root url", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "basedir", ",", "url2pathname", "(", "url_path", ")", ")", ")" ]
Get the icon of the content object
def get_icon ( brain_or_object , html_tag = True ) : # Manual approach, because `plone.app.layout.getIcon` does not reliable # work for Bika Contents coming from other catalogs than the # `portal_catalog` portal_types = get_tool ( "portal_types" ) fti = portal_types . getTypeInfo ( brain_or_object . portal_type ) icon = fti . getIcon ( ) if not icon : return "" url = "%s/%s" % ( get_url ( get_portal ( ) ) , icon ) if not html_tag : return url tag = '<img width="16" height="16" src="{url}" title="{title}" />' . format ( url = url , title = get_title ( brain_or_object ) ) return tag
3,614
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L394-L417
[ "def", "_build_trial_meta", "(", "cls", ",", "expr_dir", ")", ":", "meta_file", "=", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_META_FILE", ")", "meta", "=", "parse_json", "(", "meta_file", ")", "if", "not", "meta", ":", "job_id", "=", "expr_dir", ".", "split", "(", "\"/\"", ")", "[", "-", "2", "]", "trial_id", "=", "expr_dir", "[", "-", "8", ":", "]", "params", "=", "parse_json", "(", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_PARARM_FILE", ")", ")", "meta", "=", "{", "\"trial_id\"", ":", "trial_id", ",", "\"job_id\"", ":", "job_id", ",", "\"status\"", ":", "\"RUNNING\"", ",", "\"type\"", ":", "\"TUNE\"", ",", "\"start_time\"", ":", "os", ".", "path", ".", "getctime", "(", "expr_dir", ")", ",", "\"end_time\"", ":", "None", ",", "\"progress_offset\"", ":", "0", ",", "\"result_offset\"", ":", "0", ",", "\"params\"", ":", "params", "}", "if", "not", "meta", ".", "get", "(", "\"start_time\"", ",", "None", ")", ":", "meta", "[", "\"start_time\"", "]", "=", "os", ".", "path", ".", "getctime", "(", "expr_dir", ")", "if", "isinstance", "(", "meta", "[", "\"start_time\"", "]", ",", "float", ")", ":", "meta", "[", "\"start_time\"", "]", "=", "timestamp2date", "(", "meta", "[", "\"start_time\"", "]", ")", "if", "meta", ".", "get", "(", "\"end_time\"", ",", "None", ")", ":", "meta", "[", "\"end_time\"", "]", "=", "timestamp2date", "(", "meta", "[", "\"end_time\"", "]", ")", "meta", "[", "\"params\"", "]", "=", "parse_json", "(", "os", ".", "path", ".", "join", "(", "expr_dir", ",", "EXPR_PARARM_FILE", ")", ")", "return", "meta" ]
Get the review history for the given brain or context .
def get_review_history ( brain_or_object , rev = True ) : obj = get_object ( brain_or_object ) review_history = [ ] try : workflow = get_tool ( "portal_workflow" ) review_history = workflow . getInfoFor ( obj , 'review_history' ) except WorkflowException as e : message = str ( e ) logger . error ( "Cannot retrieve review_history on {}: {}" . format ( obj , message ) ) if not isinstance ( review_history , ( list , tuple ) ) : logger . error ( "get_review_history: expected list, recieved {}" . format ( review_history ) ) review_history = [ ] if rev is True : review_history . reverse ( ) return review_history
3,615
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L658-L681
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_access", "is", "not", "None", ":", "_logger", ".", "debug", "(", "\"Cleaning up\"", ")", "pci_cleanup", "(", "self", ".", "_access", ")", "self", ".", "_access", "=", "None" ]
Get the cancellation_state of an object
def get_cancellation_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "cancellation_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_cancellation_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'cancellation_state' )
3,616
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L759-L772
[ "def", "find_schema_yml", "(", "cls", ",", "package_name", ",", "root_dir", ",", "relative_dirs", ")", ":", "extension", "=", "\"[!.#~]*.yml\"", "file_matches", "=", "dbt", ".", "clients", ".", "system", ".", "find_matching", "(", "root_dir", ",", "relative_dirs", ",", "extension", ")", "for", "file_match", "in", "file_matches", ":", "file_contents", "=", "dbt", ".", "clients", ".", "system", ".", "load_file_contents", "(", "file_match", ".", "get", "(", "'absolute_path'", ")", ",", "strip", "=", "False", ")", "test_path", "=", "file_match", ".", "get", "(", "'relative_path'", ",", "''", ")", "original_file_path", "=", "os", ".", "path", ".", "join", "(", "file_match", ".", "get", "(", "'searched_path'", ")", ",", "test_path", ")", "try", ":", "test_yml", "=", "dbt", ".", "clients", ".", "yaml_helper", ".", "load_yaml_text", "(", "file_contents", ")", "except", "dbt", ".", "exceptions", ".", "ValidationException", "as", "e", ":", "test_yml", "=", "None", "logger", ".", "info", "(", "\"Error reading {}:{} - Skipping\\n{}\"", ".", "format", "(", "package_name", ",", "test_path", ",", "e", ")", ")", "if", "test_yml", "is", "None", ":", "continue", "yield", "original_file_path", ",", "test_yml" ]
Get the cancellation_state of an objct
def get_inactive_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "inactive_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_inactive_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'inactive_state' )
3,617
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L792-L805
[ "def", "find_schema_yml", "(", "cls", ",", "package_name", ",", "root_dir", ",", "relative_dirs", ")", ":", "extension", "=", "\"[!.#~]*.yml\"", "file_matches", "=", "dbt", ".", "clients", ".", "system", ".", "find_matching", "(", "root_dir", ",", "relative_dirs", ",", "extension", ")", "for", "file_match", "in", "file_matches", ":", "file_contents", "=", "dbt", ".", "clients", ".", "system", ".", "load_file_contents", "(", "file_match", ".", "get", "(", "'absolute_path'", ")", ",", "strip", "=", "False", ")", "test_path", "=", "file_match", ".", "get", "(", "'relative_path'", ",", "''", ")", "original_file_path", "=", "os", ".", "path", ".", "join", "(", "file_match", ".", "get", "(", "'searched_path'", ")", ",", "test_path", ")", "try", ":", "test_yml", "=", "dbt", ".", "clients", ".", "yaml_helper", ".", "load_yaml_text", "(", "file_contents", ")", "except", "dbt", ".", "exceptions", ".", "ValidationException", "as", "e", ":", "test_yml", "=", "None", "logger", ".", "info", "(", "\"Error reading {}:{} - Skipping\\n{}\"", ".", "format", "(", "package_name", ",", "test_path", ",", "e", ")", ")", "if", "test_yml", "is", "None", ":", "continue", "yield", "original_file_path", ",", "test_yml" ]
Ses the logging level of the script based on command line options .
def set_log_level ( verbose , quiet ) : if quiet : verbose = - 1 if verbose < 0 : verbose = logging . CRITICAL elif verbose == 0 : verbose = logging . WARNING elif verbose == 1 : verbose = logging . INFO elif 1 < verbose : verbose = logging . DEBUG LOGGER . setLevel ( verbose )
3,618
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L23-L41
[ "def", "exclude_types", "(", "self", ",", "*", "objs", ")", ":", "for", "o", "in", "objs", ":", "for", "t", "in", "_keytuple", "(", "o", ")", ":", "if", "t", "and", "t", "not", "in", "self", ".", "_excl_d", ":", "self", ".", "_excl_d", "[", "t", "]", "=", "0" ]
Automatically detects the pattern file format and determines whether the Aho - Corasick string matching should pay attention to word boundaries or not .
def detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) : tsv = True boundaries = on_word_boundaries with open_file ( pattern_filename ) as input_file : for line in input_file : line = line . decode ( encoding ) if line . count ( '\t' ) != 1 : tsv = False if '\\b' in line : boundaries = True if boundaries and not tsv : break return tsv , boundaries
3,619
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L43-L65
[ "def", "add_exp_key", "(", "self", ",", "key", ",", "value", ",", "ex", ")", ":", "return", "self", ".", "c", ".", "set", "(", "key", ",", "value", ",", "ex", ")" ]
Process escaped characters in sval .
def sub_escapes ( sval ) : sval = sval . replace ( '\\a' , '\a' ) sval = sval . replace ( '\\b' , '\x00' ) sval = sval . replace ( '\\f' , '\f' ) sval = sval . replace ( '\\n' , '\n' ) sval = sval . replace ( '\\r' , '\r' ) sval = sval . replace ( '\\t' , '\t' ) sval = sval . replace ( '\\v' , '\v' ) sval = sval . replace ( '\\\\' , '\\' ) return sval
3,620
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L67-L82
[ "def", "subtract", "(", "self", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "ndarray", ")", ":", "if", "val", ".", "shape", "!=", "self", ".", "value_shape", ":", "raise", "Exception", "(", "'Cannot subtract image with dimensions %s '", "'from images with dimension %s'", "%", "(", "str", "(", "val", ".", "shape", ")", ",", "str", "(", "self", ".", "value_shape", ")", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "x", "-", "val", ",", "value_shape", "=", "self", ".", "value_shape", ")" ]
Constructs a finite state machine for performing string rewriting .
def build_trie ( pattern_filename , pattern_format , encoding , on_word_boundaries ) : boundaries = on_word_boundaries if pattern_format == 'auto' or not on_word_boundaries : tsv , boundaries = detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) if pattern_format == 'auto' : if tsv : pattern_format = 'tsv' else : pattern_format = 'sed' trie = fsed . ahocorasick . AhoCorasickTrie ( ) num_candidates = 0 with open_file ( pattern_filename ) as pattern_file : for lineno , line in enumerate ( pattern_file ) : line = line . decode ( encoding ) . rstrip ( '\n' ) if not line . strip ( ) : continue # decode the line if pattern_format == 'tsv' : fields = line . split ( '\t' ) if len ( fields ) != 2 : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in tab-separated format): {}' ) . format ( lineno , line ) ) continue before , after = fields elif pattern_format == 'sed' : before = after = None line = line . lstrip ( ) if line [ 0 ] == 's' : delim = line [ 1 ] # delim might be a regex special character; # escape it if necessary if delim in '.^$*+?[](){}|\\' : delim = '\\' + delim fields = re . split ( r'(?<!\\){}' . format ( delim ) , line ) if len ( fields ) == 4 : before , after = fields [ 1 ] , fields [ 2 ] before = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , before ) after = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , after ) if before is None or after is None : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in sed format): {}' ) . format ( lineno , line ) ) continue num_candidates += 1 if on_word_boundaries and before != before . strip ( ) : LOGGER . warning ( ( 'before pattern on line {} padded whitespace; ' 'this may interact strangely with the --words ' 'option: {}' ) . format ( lineno , line ) ) before = sub_escapes ( before ) after = sub_escapes ( after ) if boundaries : before = fsed . ahocorasick . boundary_transform ( before , on_word_boundaries ) trie [ before ] = after LOGGER . info ( '{} patterns loaded from {}' . format ( num_candidates , pattern_filename ) ) return trie , boundaries
3,621
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L84-L148
[ "def", "_is_bval_type_b", "(", "grouped_dicoms", ")", ":", "bval_tag", "=", "Tag", "(", "0x0018", ",", "0x9087", ")", "bvec_tag", "=", "Tag", "(", "0x0018", ",", "0x9089", ")", "for", "group", "in", "grouped_dicoms", ":", "if", "bvec_tag", "in", "group", "[", "0", "]", "and", "bval_tag", "in", "group", "[", "0", "]", ":", "bvec", "=", "common", ".", "get_fd_array_value", "(", "group", "[", "0", "]", "[", "bvec_tag", "]", ",", "3", ")", "bval", "=", "common", ".", "get_fd_value", "(", "group", "[", "0", "]", "[", "bval_tag", "]", ")", "if", "_is_float", "(", "bvec", "[", "0", "]", ")", "and", "_is_float", "(", "bvec", "[", "1", "]", ")", "and", "_is_float", "(", "bvec", "[", "2", "]", ")", "and", "_is_float", "(", "bval", ")", "and", "bval", "!=", "0", ":", "return", "True", "return", "False" ]
Prints warning messages for every node that has both a value and a longest_prefix .
def warn_prefix_values ( trie ) : for current , _parent in trie . dfs ( ) : if current . has_value and current . longest_prefix is not None : LOGGER . warn ( ( 'pattern {} (value {}) is a superstring of pattern ' '{} (value {}) and will never be matched' ) . format ( current . prefix , current . value , current . longest_prefix . prefix , current . longest_prefix . value ) )
3,622
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L150-L160
[ "def", "map_init", "(", "interface", ",", "params", ")", ":", "import", "numpy", "as", "np", "import", "random", "np", ".", "random", ".", "seed", "(", "params", "[", "'seed'", "]", ")", "random", ".", "seed", "(", "params", "[", "'seed'", "]", ")", "return", "params" ]
Rewrites a string using the given trie object .
def rewrite_str_with_trie ( sval , trie , boundaries = False , slow = False ) : if boundaries : sval = fsed . ahocorasick . boundary_transform ( sval ) if slow : sval = trie . replace ( sval ) else : sval = trie . greedy_replace ( sval ) if boundaries : sval = '' . join ( fsed . ahocorasick . boundary_untransform ( sval ) ) return sval
3,623
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L162-L180
[ "def", "updateSeriesRegistrationStatus", "(", ")", ":", "from", ".", "models", "import", "Series", "if", "not", "getConstant", "(", "'general__enableCronTasks'", ")", ":", "return", "logger", ".", "info", "(", "'Checking status of Series that are open for registration.'", ")", "open_series", "=", "Series", ".", "objects", ".", "filter", "(", ")", ".", "filter", "(", "*", "*", "{", "'registrationOpen'", ":", "True", "}", ")", "for", "series", "in", "open_series", ":", "series", ".", "updateRegistrationStatus", "(", ")" ]
Register a function with the pipeline .
def register_function ( cls , fn , label ) : if label in cls . registered_functions : log . warning ( "Overwriting existing registered function %s" , label ) fn . label = label cls . registered_functions [ fn . label ] = fn
3,624
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L34-L40
[ "def", "update_volume", "(", "self", ",", "volumeID", ",", "metadata", ")", ":", "log", ".", "debug", "(", "'updating volume metadata: {}'", ".", "format", "(", "volumeID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "normalized", "=", "self", ".", "normalize_volume", "(", "rawVolume", ")", "normalized", "[", "'metadata'", "]", "=", "metadata", "_", ",", "newRawVolume", "=", "self", ".", "denormalize_volume", "(", "normalized", ")", "self", ".", "_db", ".", "modify_book", "(", "volumeID", ",", "newRawVolume", ")" ]
Loads a previously serialised pipeline .
def load ( cls , serialised ) : pipeline = cls ( ) for fn_name in serialised : try : fn = cls . registered_functions [ fn_name ] except KeyError : raise BaseLunrException ( "Cannot load unregistered function " . format ( fn_name ) ) else : pipeline . add ( fn ) return pipeline
3,625
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L43-L56
[ "def", "coordination_geometry_symmetry_measures_fallback_random", "(", "self", ",", "coordination_geometry", ",", "NRANDOM", "=", "10", ",", "points_perfect", "=", "None", ")", ":", "permutations_symmetry_measures", "=", "[", "None", "]", "*", "NRANDOM", "permutations", "=", "list", "(", ")", "algos", "=", "list", "(", ")", "perfect2local_maps", "=", "list", "(", ")", "local2perfect_maps", "=", "list", "(", ")", "for", "iperm", "in", "range", "(", "NRANDOM", ")", ":", "perm", "=", "np", ".", "random", ".", "permutation", "(", "coordination_geometry", ".", "coordination_number", ")", "permutations", ".", "append", "(", "perm", ")", "p2l", "=", "{", "}", "l2p", "=", "{", "}", "for", "i_p", ",", "pp", "in", "enumerate", "(", "perm", ")", ":", "p2l", "[", "i_p", "]", "=", "pp", "l2p", "[", "pp", "]", "=", "i_p", "perfect2local_maps", ".", "append", "(", "p2l", ")", "local2perfect_maps", ".", "append", "(", "l2p", ")", "points_distorted", "=", "self", ".", "local_geometry", ".", "points_wcs_ctwcc", "(", "permutation", "=", "perm", ")", "sm_info", "=", "symmetry_measure", "(", "points_distorted", "=", "points_distorted", ",", "points_perfect", "=", "points_perfect", ")", "sm_info", "[", "'translation_vector'", "]", "=", "self", ".", "local_geometry", ".", "centroid_with_centre", "permutations_symmetry_measures", "[", "iperm", "]", "=", "sm_info", "algos", ".", "append", "(", "'APPROXIMATE_FALLBACK'", ")", "return", "permutations_symmetry_measures", ",", "permutations", ",", "algos", ",", "local2perfect_maps", ",", "perfect2local_maps" ]
Adds new functions to the end of the pipeline .
def add ( self , * args ) : for fn in args : self . warn_if_function_not_registered ( fn ) self . _stack . append ( fn )
3,626
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L58-L68
[ "def", "from_dict", "(", "cls", ",", "json_dict", ")", ":", "json_macaroon", "=", "json_dict", ".", "get", "(", "'m'", ")", "if", "json_macaroon", "is", "None", ":", "# Try the v1 format if we don't have a macaroon field.", "m", "=", "pymacaroons", ".", "Macaroon", ".", "deserialize", "(", "json", ".", "dumps", "(", "json_dict", ")", ",", "json_serializer", ".", "JsonSerializer", "(", ")", ")", "macaroon", "=", "Macaroon", "(", "root_key", "=", "None", ",", "id", "=", "None", ",", "namespace", "=", "legacy_namespace", "(", ")", ",", "version", "=", "_bakery_version", "(", "m", ".", "version", ")", ")", "macaroon", ".", "_macaroon", "=", "m", "return", "macaroon", "version", "=", "json_dict", ".", "get", "(", "'v'", ",", "None", ")", "if", "version", "is", "None", ":", "raise", "ValueError", "(", "'no version specified'", ")", "if", "(", "version", "<", "VERSION_3", "or", "version", ">", "LATEST_VERSION", ")", ":", "raise", "ValueError", "(", "'unknown bakery version {}'", ".", "format", "(", "version", ")", ")", "m", "=", "pymacaroons", ".", "Macaroon", ".", "deserialize", "(", "json", ".", "dumps", "(", "json_macaroon", ")", ",", "json_serializer", ".", "JsonSerializer", "(", ")", ")", "if", "m", ".", "version", "!=", "macaroon_version", "(", "version", ")", ":", "raise", "ValueError", "(", "'underlying macaroon has inconsistent version; '", "'got {} want {}'", ".", "format", "(", "m", ".", "version", ",", "macaroon_version", "(", "version", ")", ")", ")", "namespace", "=", "checkers", ".", "deserialize_namespace", "(", "json_dict", ".", "get", "(", "'ns'", ")", ")", "cdata", "=", "json_dict", ".", "get", "(", "'cdata'", ",", "{", "}", ")", "caveat_data", "=", "{", "}", "for", "id64", "in", "cdata", ":", "id", "=", "b64decode", "(", "id64", ")", "data", "=", "b64decode", "(", "cdata", "[", "id64", "]", ")", "caveat_data", "[", "id", "]", "=", "data", "macaroon", "=", "Macaroon", "(", "root_key", "=", "None", ",", "id", "=", "None", ",", "namespace", "=", "namespace", ",", "version", "=", "version", ")", "macaroon", ".", "_caveat_data", "=", "caveat_data", "macaroon", ".", "_macaroon", "=", "m", "return", "macaroon" ]
Adds a single function after a function that already exists in the pipeline .
def after ( self , existing_fn , new_fn ) : self . warn_if_function_not_registered ( new_fn ) try : index = self . _stack . index ( existing_fn ) self . _stack . insert ( index + 1 , new_fn ) except ValueError as e : six . raise_from ( BaseLunrException ( "Cannot find existing_fn" ) , e )
3,627
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L81-L89
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Runs the current list of functions that make up the pipeline against the passed tokens .
def run ( self , tokens ) : for fn in self . _stack : results = [ ] for i , token in enumerate ( tokens ) : # JS ignores additional arguments to the functions but we # force pipeline functions to declare (token, i, tokens) # or *args result = fn ( token , i , tokens ) if not result : continue if isinstance ( result , ( list , tuple ) ) : # simulate Array.concat results . extend ( result ) else : results . append ( result ) tokens = results return tokens
3,628
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L110-L128
[ "def", "from_pandas_dataframe", "(", "cls", ",", "bqm_df", ",", "offset", "=", "0.0", ",", "interactions", "=", "None", ")", ":", "if", "interactions", "is", "None", ":", "interactions", "=", "[", "]", "bqm", "=", "cls", "(", "{", "}", ",", "{", "}", ",", "offset", ",", "Vartype", ".", "BINARY", ")", "for", "u", ",", "row", "in", "bqm_df", ".", "iterrows", "(", ")", ":", "for", "v", ",", "bias", "in", "row", ".", "iteritems", "(", ")", ":", "if", "u", "==", "v", ":", "bqm", ".", "add_variable", "(", "u", ",", "bias", ")", "elif", "bias", ":", "bqm", ".", "add_interaction", "(", "u", ",", "v", ",", "bias", ")", "for", "u", ",", "v", "in", "interactions", ":", "bqm", ".", "add_interaction", "(", "u", ",", "v", ",", "0.0", ")", "return", "bqm" ]
Convenience method for passing a string through a pipeline and getting strings out . This method takes care of wrapping the passed string in a token and mapping the resulting tokens back to strings .
def run_string ( self , string , metadata = None ) : token = Token ( string , metadata ) return [ str ( tkn ) for tkn in self . run ( [ token ] ) ]
3,629
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L130-L135
[ "def", "update", "(", "self", ",", "other", ")", ":", "self", ".", "update_ttl", "(", "other", ".", "ttl", ")", "super", "(", "Rdataset", ",", "self", ")", ".", "update", "(", "other", ")" ]
Use the same MongoDB client as pmxbot if available .
def get_client ( ) : with contextlib . suppress ( Exception ) : store = Storage . from_URI ( ) assert isinstance ( store , pmxbot . storage . MongoDBStorage ) return store . db . database . client
3,630
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/pmxbot.py#L13-L20
[ "def", "change_thickness", "(", "self", ",", "element", ",", "thickness", ")", ":", "if", "element", "==", "\"header\"", ":", "self", ".", "data", ".", "append", "(", "Command", "(", "\"renewcommand\"", ",", "arguments", "=", "[", "NoEscape", "(", "r\"\\headrulewidth\"", ")", ",", "str", "(", "thickness", ")", "+", "'pt'", "]", ")", ")", "elif", "element", "==", "\"footer\"", ":", "self", ".", "data", ".", "append", "(", "Command", "(", "\"renewcommand\"", ",", "arguments", "=", "[", "NoEscape", "(", "r\"\\footrulewidth\"", ")", ",", "str", "(", "thickness", ")", "+", "'pt'", "]", ")", ")" ]
In a sharded cluster create a database in a particular shard .
def create_db_in_shard ( db_name , shard , client = None ) : client = client or pymongo . MongoClient ( ) # flush the router config to ensure it's not stale res = client . admin . command ( 'flushRouterConfig' ) if not res . get ( 'ok' ) : raise RuntimeError ( "unable to flush router config" ) if shard not in get_ids ( client . config . shards ) : raise ValueError ( f"Unknown shard {shard}" ) if db_name in get_ids ( client . config . databases ) : raise ValueError ( "database already exists" ) # MongoDB doesn't have a 'create database' command, so insert an # item into a collection and then drop the collection. client [ db_name ] . foo . insert ( { 'foo' : 1 } ) client [ db_name ] . foo . drop ( ) if client [ db_name ] . collection_names ( ) : raise ValueError ( "database has collections" ) primary = client [ 'config' ] . databases . find_one ( db_name ) [ 'primary' ] if primary != shard : res = client . admin . command ( 'movePrimary' , value = db_name , to = shard ) if not res . get ( 'ok' ) : raise RuntimeError ( str ( res ) ) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}" )
3,631
https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/sharding.py#L16-L43
[ "def", "read_until", "(", "data", ":", "bytes", ",", "*", ",", "return_tail", ":", "bool", "=", "True", ",", "from_", "=", "None", ")", "->", "bytes", ":", "return", "(", "yield", "(", "Traps", ".", "_read_until", ",", "data", ",", "return_tail", ",", "from_", ")", ")" ]
Calculates the Luhn checksum for number
def luhn_checksum ( number , chars = DIGITS ) : length = len ( chars ) number = [ chars . index ( n ) for n in reversed ( str ( number ) ) ] return ( sum ( number [ : : 2 ] ) + sum ( sum ( divmod ( i * 2 , length ) ) for i in number [ 1 : : 2 ] ) ) % length
3,632
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L9-L25
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Calculate the Luhn check digit for number .
def luhn_calc ( number , chars = DIGITS ) : checksum = luhn_checksum ( str ( number ) + chars [ 0 ] , chars ) return chars [ - checksum ]
3,633
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L28-L41
[ "def", "create_stream_subscription", "(", "self", ",", "stream", ",", "on_data", ",", "timeout", "=", "60", ")", ":", "options", "=", "rest_pb2", ".", "StreamSubscribeRequest", "(", ")", "options", ".", "stream", "=", "stream", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'stream'", ",", "options", "=", "options", ")", "# Represent subscription as a future", "subscription", "=", "WebSocketSubscriptionFuture", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_stream_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
Converts a number to a string of decimals in base 10 .
def to_decimal ( number , strip = '- ' ) : if isinstance ( number , six . integer_types ) : return str ( number ) number = str ( number ) number = re . sub ( r'[%s]' % re . escape ( strip ) , '' , number ) # hexadecimal if number . startswith ( '0x' ) : return to_decimal ( int ( number [ 2 : ] , 16 ) ) # octal elif number . startswith ( 'o' ) : return to_decimal ( int ( number [ 1 : ] , 8 ) ) # binary elif number . startswith ( 'b' ) : return to_decimal ( int ( number [ 1 : ] , 2 ) ) else : return str ( int ( number ) )
3,634
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L71-L103
[ "def", "getShocks", "(", "self", ")", ":", "PersistentShockConsumerType", ".", "getShocks", "(", "self", ")", "# Get permanent and transitory income shocks", "MedShkNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize medical shock array", "MedPriceNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize relative price array", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "these", "=", "t", "==", "self", ".", "t_cycle", "N", "=", "np", ".", "sum", "(", "these", ")", "if", "N", ">", "0", ":", "MedShkAvg", "=", "self", ".", "MedShkAvg", "[", "t", "]", "MedShkStd", "=", "self", ".", "MedShkStd", "[", "t", "]", "MedPrice", "=", "self", ".", "MedPrice", "[", "t", "]", "MedShkNow", "[", "these", "]", "=", "self", ".", "RNG", ".", "permutation", "(", "approxLognormal", "(", "N", ",", "mu", "=", "np", ".", "log", "(", "MedShkAvg", ")", "-", "0.5", "*", "MedShkStd", "**", "2", ",", "sigma", "=", "MedShkStd", ")", "[", "1", "]", ")", "MedPriceNow", "[", "these", "]", "=", "MedPrice", "self", ".", "MedShkNow", "=", "MedShkNow", "self", ".", "MedPriceNow", "=", "MedPriceNow" ]
Returns a method from a given class or instance . When the method doest not exist it returns None . Also works with properties and cached properties .
def get_class_method ( cls_or_inst , method_name ) : cls = cls_or_inst if isinstance ( cls_or_inst , type ) else cls_or_inst . __class__ meth = getattr ( cls , method_name , None ) if isinstance ( meth , property ) : meth = meth . fget elif isinstance ( meth , cached_property ) : meth = meth . func return meth
3,635
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/__init__.py#L17-L28
[ "def", "_unbind_topics", "(", "self", ",", "topics", ")", ":", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "status", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "tracing", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "streaming", ")", "self", ".", "client", ".", "unsubscribe", "(", "topics", ".", "response", ")" ]
Manage a list of FITS resources
def manage_fits ( list_of_frame ) : import astropy . io . fits as fits import numina . types . dataframe as df refs = [ ] for frame in list_of_frame : if isinstance ( frame , str ) : ref = fits . open ( frame ) refs . append ( ref ) elif isinstance ( frame , fits . HDUList ) : refs . append ( frame ) elif isinstance ( frame , df . DataFrame ) : ref = frame . open ( ) refs . append ( ref ) else : refs . append ( frame ) try : yield refs finally : # release for obj in refs : obj . close ( )
3,636
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/frame/combine.py#L185-L208
[ "def", "_wget", "(", "cmd", ",", "opts", "=", "None", ",", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "ret", "=", "{", "'res'", ":", "True", ",", "'msg'", ":", "[", "]", "}", "# prepare authentication", "auth", "=", "_auth", "(", "url", ")", "if", "auth", "is", "False", ":", "ret", "[", "'res'", "]", "=", "False", "ret", "[", "'msg'", "]", "=", "'missing username and password settings (grain/pillar)'", "return", "ret", "# prepare URL", "if", "url", "[", "-", "1", "]", "!=", "'/'", ":", "url", "+=", "'/'", "url6", "=", "url", "url", "+=", "'text/{0}'", ".", "format", "(", "cmd", ")", "url6", "+=", "'{0}'", ".", "format", "(", "cmd", ")", "if", "opts", ":", "url", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "url6", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "# Make the HTTP request", "_install_opener", "(", "auth", ")", "try", ":", "# Trying tomcat >= 7 url", "ret", "[", "'msg'", "]", "=", "_urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", "except", "Exception", ":", "try", ":", "# Trying tomcat6 url", "ret", "[", "'msg'", "]", "=", "_urlopen", "(", "url6", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", "except", "Exception", ":", "ret", "[", "'msg'", "]", "=", "'Failed to create HTTP request'", "if", "not", "ret", "[", "'msg'", "]", "[", "0", "]", ".", "startswith", "(", "'OK'", ")", ":", "ret", "[", "'res'", "]", "=", "False", "return", "ret" ]
Set debugging level based on debugplot value .
def logging_from_debugplot ( debugplot ) : if isinstance ( debugplot , int ) : if abs ( debugplot ) >= 10 : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) else : raise ValueError ( "Unexpected debugplot=" + str ( debugplot ) )
3,637
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/logging_from_debugplot.py#L26-L42
[ "def", "_read_para_reg_failed", "(", "self", ",", "code", ",", "cbit", ",", "clen", ",", "*", ",", "desc", ",", "length", ",", "version", ")", ":", "_life", "=", "collections", ".", "namedtuple", "(", "'Lifetime'", ",", "(", "'min'", ",", "'max'", ")", ")", "_mint", "=", "self", ".", "_read_unpack", "(", "1", ")", "_maxt", "=", "self", ".", "_read_unpack", "(", "1", ")", "_type", "=", "list", "(", ")", "for", "_", "in", "range", "(", "clen", "-", "2", ")", ":", "_code", "=", "self", ".", "_read_unpack", "(", "1", ")", "_kind", "=", "_REG_FAILURE_TYPE", ".", "get", "(", "_code", ")", "if", "_kind", "is", "None", ":", "if", "0", "<=", "_code", "<=", "200", ":", "_kind", "=", "'Unassigned (IETF Review)'", "elif", "201", "<=", "_code", "<=", "255", ":", "_kind", "=", "'Unassigned (Reserved for Private Use)'", "else", ":", "raise", "ProtocolError", "(", "f'HIPv{version}: [Parano {code}] invalid format'", ")", "_type", ".", "append", "(", "_kind", ")", "reg_failed", "=", "dict", "(", "type", "=", "desc", ",", "critical", "=", "cbit", ",", "length", "=", "clen", ",", "lifetime", "=", "_life", "(", "_mint", ",", "_maxt", ")", ",", "reg_type", "=", "tuple", "(", "_type", ")", ",", ")", "_plen", "=", "length", "-", "clen", "if", "_plen", ":", "self", ".", "_read_fileng", "(", "_plen", ")", "return", "reg_failed" ]
Auxiliary function to display 1d plot .
def ximplot ( ycut , title = None , show = True , plot_bbox = ( 0 , 0 ) , geometry = ( 0 , 0 , 640 , 480 ) , tight_layout = True , debugplot = None ) : # protections if type ( ycut ) is not np . ndarray : raise ValueError ( "ycut=" + str ( ycut ) + " must be a numpy.ndarray" ) elif ycut . ndim is not 1 : raise ValueError ( "ycut.ndim=" + str ( ycut . dim ) + " must be 1" ) # read bounding box limits nc1 , nc2 = plot_bbox plot_coord = ( nc1 == 0 and nc2 == 0 ) naxis1_ = ycut . size if not plot_coord : # check that ycut size corresponds to bounding box size if naxis1_ != nc2 - nc1 + 1 : raise ValueError ( "ycut.size=" + str ( ycut . size ) + " does not correspond to bounding box size" ) # display image from numina . array . display . matplotlib_qt import plt if not show : plt . ioff ( ) fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . autoscale ( False ) ymin = ycut . min ( ) ymax = ycut . max ( ) if plot_coord : xmin = - 0.5 xmax = ( naxis1_ - 1 ) + 0.5 xcut = np . arange ( naxis1_ , dtype = np . float ) ax . set_xlabel ( 'image array index in the X direction' ) ax . set_ylabel ( 'pixel value' ) else : xmin = float ( nc1 ) - 0.5 xmax = float ( nc2 ) + 0.5 xcut = np . linspace ( start = nc1 , stop = nc2 , num = nc2 - nc1 + 1 ) ax . set_xlabel ( 'image pixel in the X direction' ) ax . set_ylabel ( 'pixel value' ) ax . set_xlim ( xmin , xmax ) ax . set_ylim ( ymin , ymax ) ax . plot ( xcut , ycut , '-' ) if title is not None : ax . set_title ( title ) # set the geometry set_window_geometry ( geometry ) if show : pause_debugplot ( debugplot , pltshow = show , tight_layout = tight_layout ) else : if tight_layout : plt . tight_layout ( ) # return axes return ax
3,638
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/ximplot.py#L19-L112
[ "def", "delete", "(", "self", ",", "event", ")", ":", "super", "(", "CeleryReceiver", ",", "self", ")", ".", "delete", "(", "event", ")", "AsyncResult", "(", "event", ".", "id", ")", ".", "revoke", "(", "terminate", "=", "True", ")" ]
Oversample spectrum .
def oversample1d ( sp , crval1 , cdelt1 , oversampling = 1 , debugplot = 0 ) : if sp . ndim != 1 : raise ValueError ( 'Unexpected array dimensions' ) naxis1 = sp . size naxis1_over = naxis1 * oversampling cdelt1_over = cdelt1 / oversampling xmin = crval1 - cdelt1 / 2 # left border of first pixel crval1_over = xmin + cdelt1_over / 2 sp_over = np . zeros ( naxis1_over ) for i in range ( naxis1 ) : i1 = i * oversampling i2 = i1 + oversampling sp_over [ i1 : i2 ] = sp [ i ] if abs ( debugplot ) in ( 21 , 22 ) : crvaln = crval1 + ( naxis1 - 1 ) * cdelt1 crvaln_over = crval1_over + ( naxis1_over - 1 ) * cdelt1_over xover = np . linspace ( crval1_over , crvaln_over , naxis1_over ) ax = ximplotxy ( np . linspace ( crval1 , crvaln , naxis1 ) , sp , 'bo' , label = 'original' , show = False ) ax . plot ( xover , sp_over , 'r+' , label = 'resampled' ) pause_debugplot ( debugplot , pltshow = True ) return sp_over , crval1_over , cdelt1_over
3,639
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/resample.py#L22-L78
[ "def", "delete_entity", "(", "self", ",", "entity_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/entity/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", "=", "api_path", ",", ")" ]
Compute borders of pixels for interpolation .
def map_borders ( wls ) : midpt_wl = 0.5 * ( wls [ 1 : ] + wls [ : - 1 ] ) all_borders = np . zeros ( ( wls . shape [ 0 ] + 1 , ) ) all_borders [ 1 : - 1 ] = midpt_wl all_borders [ 0 ] = 2 * wls [ 0 ] - midpt_wl [ 0 ] all_borders [ - 1 ] = 2 * wls [ - 1 ] - midpt_wl [ - 1 ] return all_borders
3,640
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/resample.py#L215-L225
[ "def", "__get_table_limits", "(", ")", ":", "table_counts", "=", "{", "'max_adjectives'", ":", "None", ",", "'max_names'", ":", "None", ",", "'max_nouns'", ":", "None", ",", "'max_sentences'", ":", "None", ",", "'max_faults'", ":", "None", ",", "'max_verbs'", ":", "None", "}", "cursor", "=", "CONN", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT count(*) FROM suradjs'", ")", "table_counts", "[", "'max_adjectives'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_adjectives'", "]", "=", "table_counts", "[", "'max_adjectives'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surnames'", ")", "table_counts", "[", "'max_names'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_names'", "]", "=", "table_counts", "[", "'max_names'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surnouns'", ")", "table_counts", "[", "'max_nouns'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_nouns'", "]", "=", "table_counts", "[", "'max_nouns'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM sursentences'", ")", "table_counts", "[", "'max_sen'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_sen'", "]", "=", "table_counts", "[", "'max_sen'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surfaults'", ")", "table_counts", "[", "'max_fau'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_fau'", "]", "=", "table_counts", "[", "'max_fau'", "]", "[", "0", "]", "cursor", ".", "execute", "(", "'SELECT count(*) FROM surverbs'", ")", "table_counts", "[", "'max_verb'", "]", "=", "cursor", ".", "fetchone", "(", ")", "table_counts", "[", "'max_verb'", "]", "=", "table_counts", "[", "'max_verb'", "]", "[", "0", "]", "return", "table_counts" ]
Import an object given its fully qualified name .
def import_object ( path ) : spl = path . split ( '.' ) if len ( spl ) == 1 : return importlib . import_module ( path ) # avoid last part for the moment cls = spl [ - 1 ] mods = '.' . join ( spl [ : - 1 ] ) mm = importlib . import_module ( mods ) # try to get the last part as an attribute try : obj = getattr ( mm , cls ) return obj except AttributeError : pass # Try to import the last part rr = importlib . import_module ( path ) return rr
3,641
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/objimport.py#L17-L36
[ "def", "_select_broker_pair", "(", "self", ",", "rg_destination", ",", "victim_partition", ")", ":", "broker_source", "=", "self", ".", "_elect_source_broker", "(", "victim_partition", ")", "broker_destination", "=", "rg_destination", ".", "_elect_dest_broker", "(", "victim_partition", ")", "return", "broker_source", ",", "broker_destination" ]
Return an argparse . ArgumentParser object with losser s arguments .
def make_parser ( add_help = True , exclude_args = None ) : if exclude_args is None : exclude_args = [ ] parser = argparse . ArgumentParser ( add_help = add_help ) parser . description = ( "Filter, transform and export a list of JSON " "objects on stdin to JSON or CSV on stdout" ) if "--columns" not in exclude_args : parser . add_argument ( "--columns" , dest = "columns_file" , help = "the JSON file specifying the columns to be output" , ) if ( "-i" not in exclude_args ) and ( "--input" not in exclude_args ) : parser . add_argument ( "-i" , "--input" , help = "read input from the given file instead of from stdin" , dest = 'input_data' , # Because input is a Python builtin. ) if ( "-c" not in exclude_args ) and ( "--column" not in exclude_args ) : parser . add_argument ( "-c" , "--column" , action = ColumnsAction ) if "--pattern" not in exclude_args : parser . add_argument ( "--pattern" , action = ColumnsAction , nargs = '+' ) if "--max-length" not in exclude_args : parser . add_argument ( "--max-length" , action = ColumnsAction ) if "--strip" not in exclude_args : parser . add_argument ( "--strip" , nargs = "?" , action = ColumnsAction ) if "--deduplicate" not in exclude_args : parser . add_argument ( "--deduplicate" , nargs = '?' , action = ColumnsAction ) if "--case-sensitive" not in exclude_args : parser . add_argument ( "--case-sensitive" , nargs = '?' , action = ColumnsAction ) if "--unique" not in exclude_args : parser . add_argument ( "--unique" , nargs = "?" , action = ColumnsAction ) if ( "-p" not in exclude_args ) and ( "--pretty" not in exclude_args ) : parser . add_argument ( "-p" , "--pretty" , action = "store_true" ) return parser
3,642
https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L153-L211
[ "def", "development_verify", "(", ")", ":", "with", "open", "(", "DEVELOPMENT_TEMPLATE", ",", "\"r\"", ")", "as", "file_obj", ":", "template", "=", "file_obj", ".", "read", "(", ")", "expected", "=", "template", ".", "format", "(", "revision", "=", "REVISION", ",", "rtd_version", "=", "RTD_VERSION", ")", "with", "open", "(", "DEVELOPMENT_FILE", ",", "\"r\"", ")", "as", "file_obj", ":", "contents", "=", "file_obj", ".", "read", "(", ")", "if", "contents", "!=", "expected", ":", "err_msg", "=", "\"\\n\"", "+", "get_diff", "(", "contents", ",", "expected", ",", "\"DEVELOPMENT.rst.actual\"", ",", "\"DEVELOPMENT.rst.expected\"", ",", ")", "raise", "ValueError", "(", "err_msg", ")", "else", ":", "print", "(", "\"DEVELOPMENT.rst contents are as expected.\"", ")" ]
Parse the command line arguments return an argparse namespace object .
def parse ( parser = None , args = None ) : if not parser : parser = make_parser ( ) try : parsed_args = parser . parse_args ( args ) except SystemExit as err : raise CommandLineExit ( err . code ) try : columns = parsed_args . columns except AttributeError : columns = collections . OrderedDict ( ) parsed_args . columns = columns for title , spec in columns . items ( ) : if "pattern" not in spec : raise ColumnWithoutPatternError ( 'Column "{0}" needs a pattern' . format ( title ) ) # Change length-1 patterns into strings (not lists of one string). if len ( spec [ "pattern" ] ) == 1 : spec [ "pattern" ] = spec [ "pattern" ] [ 0 ] if columns and parsed_args . columns_file : raise ColumnsAndColumnsFileError ( "You can't use the --column and --columns options together (yet)" ) elif parsed_args . columns_file and not columns : parsed_args . columns = parsed_args . columns_file elif ( not columns ) and ( not parsed_args . columns_file ) : # Crash if no columns specified. # In the future we'll support simply converting all JSON fields to CSV # columns if no columns are specified, and this will be removed. raise NoColumnsError ( "You must give either a --columns or at least one -c/--column " "argument" ) else : assert columns return parsed_args
3,643
https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L214-L282
[ "def", "person_same_name_map", "(", "json_content", ",", "role_from", ")", ":", "matched_editors", "=", "[", "(", "i", ",", "person", ")", "for", "i", ",", "person", "in", "enumerate", "(", "json_content", ")", "if", "person", ".", "get", "(", "'role'", ")", "in", "role_from", "]", "same_name_map", "=", "{", "}", "for", "i", ",", "editor", "in", "matched_editors", ":", "if", "not", "editor", ".", "get", "(", "\"name\"", ")", ":", "continue", "# compare name of each", "name", "=", "editor", ".", "get", "(", "\"name\"", ")", ".", "get", "(", "\"index\"", ")", "if", "name", "not", "in", "same_name_map", ":", "same_name_map", "[", "name", "]", "=", "[", "]", "same_name_map", "[", "name", "]", ".", "append", "(", "i", ")", "return", "same_name_map" ]
Read command - line args and stdin return the result .
def do ( parser = None , args = None , in_ = None , table_function = None ) : in_ = in_ or sys . stdin table_function = table_function or losser . table parsed_args = parse ( parser = parser , args = args ) # Read the input data from stdin or a file. if parsed_args . input_data : input_data = open ( parsed_args . input_data , 'r' ) . read ( ) else : input_data = in_ . read ( ) dicts = json . loads ( input_data ) csv_string = table_function ( dicts , parsed_args . columns , csv = True , pretty = parsed_args . pretty ) return csv_string
3,644
https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L285-L314
[ "def", "checkIsConsistent", "(", "self", ")", ":", "if", "is_an_array", "(", "self", ".", "mask", ")", "and", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ":", "raise", "ConsistencyError", "(", "\"Shape mismatch mask={}, data={}\"", ".", "format", "(", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ")", ")" ]
Generate a normalized Gaussian profile from its FWHM
def generate_gaussian_profile ( seeing_fwhm ) : FWHM_G = 2 * math . sqrt ( 2 * math . log ( 2 ) ) sigma = seeing_fwhm / FWHM_G amplitude = 1.0 / ( 2 * math . pi * sigma * sigma ) seeing_model = Gaussian2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , x_stddev = sigma , y_stddev = sigma ) return seeing_model
3,645
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/atmosphere.py#L65-L75
[ "def", "bind_transient", "(", "type_to_bind", ":", "hexdi", ".", "core", ".", "restype", ",", "accessor", ":", "hexdi", ".", "core", ".", "clstype", ")", ":", "hexdi", ".", "core", ".", "get_root_container", "(", ")", ".", "bind_type", "(", "type_to_bind", ",", "accessor", ",", "lifetime", ".", "PerResolveLifeTimeManager", ")" ]
Generate a normalized Moffat profile from its FWHM and alpha
def generate_moffat_profile ( seeing_fwhm , alpha ) : scale = 2 * math . sqrt ( 2 ** ( 1.0 / alpha ) - 1 ) gamma = seeing_fwhm / scale amplitude = 1.0 / math . pi * ( alpha - 1 ) / gamma ** 2 seeing_model = Moffat2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , gamma = gamma , alpha = alpha ) return seeing_model
3,646
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/atmosphere.py#L78-L89
[ "def", "delete_expired_requests", "(", ")", ":", "InclusionRequest", ".", "query", ".", "filter_by", "(", "InclusionRequest", ".", "expiry_date", ">", "datetime", ".", "utcnow", "(", ")", ")", ".", "delete", "(", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Converts a model field to a dictionary
def field_to_dict ( field , instance ) : # avoid a circular import from django . db . models . fields . related import ManyToManyField return ( many_to_many_field_to_dict ( field , instance ) if isinstance ( field , ManyToManyField ) else field . value_from_object ( instance ) )
3,647
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L37-L45
[ "def", "bounds", "(", "self", ",", "vertices", ")", ":", "if", "util", ".", "is_shape", "(", "vertices", ",", "(", "-", "1", ",", "2", ")", ")", "and", "self", ".", "closed", ":", "# if we have a closed arc (a circle), we can return the actual bounds", "# this only works in two dimensions, otherwise this would return the", "# AABB of an sphere", "info", "=", "self", ".", "center", "(", "vertices", ")", "bounds", "=", "np", ".", "array", "(", "[", "info", "[", "'center'", "]", "-", "info", "[", "'radius'", "]", ",", "info", "[", "'center'", "]", "+", "info", "[", "'radius'", "]", "]", ",", "dtype", "=", "np", ".", "float64", ")", "else", ":", "# since the AABB of a partial arc is hard, approximate", "# the bounds by just looking at the discrete values", "discrete", "=", "self", ".", "discrete", "(", "vertices", ")", "bounds", "=", "np", ".", "array", "(", "[", "discrete", ".", "min", "(", "axis", "=", "0", ")", ",", "discrete", ".", "max", "(", "axis", "=", "0", ")", "]", ",", "dtype", "=", "np", ".", "float64", ")", "return", "bounds" ]
The same implementation as django model_to_dict but editable fields are allowed
def model_to_dict ( instance , fields = None , exclude = None ) : return { field . name : field_to_dict ( field , instance ) for field in chain ( instance . _meta . concrete_fields , instance . _meta . many_to_many ) # pylint: disable=W0212 if not should_exclude_field ( field , fields , exclude ) }
3,648
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L70-L79
[ "def", "Heartbeat", "(", "self", ")", ":", "service_key", "=", "_GetServiceKey", "(", ")", "try", ":", "winreg", ".", "SetValueEx", "(", "service_key", ",", "\"Nanny.heartbeat\"", ",", "0", ",", "winreg", ".", "REG_DWORD", ",", "int", "(", "time", ".", "time", "(", ")", ")", ")", "except", "OSError", "as", "e", ":", "logging", ".", "debug", "(", "\"Failed to heartbeat nanny at %s: %s\"", ",", "service_key", ",", "e", ")" ]
Changes a given changed_fields on each object in the queryset saves objects and returns the changed objects in the queryset .
def change_and_save ( self , update_only_changed_fields = False , * * changed_fields ) : bulk_change_and_save ( self , update_only_changed_fields = update_only_changed_fields , * * changed_fields ) return self . filter ( )
3,649
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L258-L264
[ "def", "getVolumeInformation", "(", "self", ",", "volumeNameBuffer", ",", "volumeNameSize", ",", "volumeSerialNumber", ",", "maximumComponentLength", ",", "fileSystemFlags", ",", "fileSystemNameBuffer", ",", "fileSystemNameSize", ",", "dokanFileInfo", ",", ")", ":", "ret", "=", "self", ".", "operations", "(", "'getVolumeInformation'", ")", "# populate volume name buffer", "ctypes", ".", "memmove", "(", "volumeNameBuffer", ",", "ret", "[", "'volumeNameBuffer'", "]", ",", "min", "(", "ctypes", ".", "sizeof", "(", "ctypes", ".", "c_wchar", ")", "*", "len", "(", "ret", "[", "'volumeNameBuffer'", "]", ")", ",", "volumeNameSize", ",", ")", ",", ")", "# populate serial number buffer", "serialNum", "=", "ctypes", ".", "c_ulong", "(", "self", ".", "serialNumber", ")", "ctypes", ".", "memmove", "(", "volumeSerialNumber", ",", "ctypes", ".", "byref", "(", "serialNum", ")", ",", "ctypes", ".", "sizeof", "(", "ctypes", ".", "c_ulong", ")", ")", "# populate max component length", "maxCompLen", "=", "ctypes", ".", "c_ulong", "(", "ret", "[", "'maximumComponentLength'", "]", ")", "ctypes", ".", "memmove", "(", "maximumComponentLength", ",", "ctypes", ".", "byref", "(", "maxCompLen", ")", ",", "ctypes", ".", "sizeof", "(", "ctypes", ".", "c_ulong", ")", ",", ")", "# populate filesystem flags buffer", "fsFlags", "=", "ctypes", ".", "c_ulong", "(", "ret", "[", "'fileSystemFlags'", "]", ")", "ctypes", ".", "memmove", "(", "fileSystemFlags", ",", "ctypes", ".", "byref", "(", "fsFlags", ")", ",", "ctypes", ".", "sizeof", "(", "ctypes", ".", "c_ulong", ")", ")", "# populate filesystem name", "ctypes", ".", "memmove", "(", "fileSystemNameBuffer", ",", "ret", "[", "'fileSystemNameBuffer'", "]", ",", "min", "(", "ctypes", ".", "sizeof", "(", "ctypes", ".", "c_wchar", ")", "*", "len", "(", "ret", "[", "'fileSystemNameBuffer'", "]", ")", ",", "fileSystemNameSize", ",", ")", ",", ")", "return", "d1_onedrive", ".", "impl", ".", "drivers", ".", "dokan", ".", "const", ".", "DOKAN_SUCCESS" ]
Helper for matplotlib imshow
def extent ( self ) : return ( self . intervals [ 1 ] . pix1 - 0.5 , self . intervals [ 1 ] . pix2 - 0.5 , self . intervals [ 0 ] . pix1 - 0.5 , self . intervals [ 0 ] . pix2 - 0.5 , )
3,650
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/bbox.py#L214-L221
[ "def", "save_vocabulary", "(", "self", ",", "vocab_path", ")", ":", "index", "=", "0", "if", "os", ".", "path", ".", "isdir", "(", "vocab_path", ")", ":", "vocab_file", "=", "os", ".", "path", ".", "join", "(", "vocab_path", ",", "VOCAB_NAME", ")", "with", "open", "(", "vocab_file", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "writer", ":", "for", "token", ",", "token_index", "in", "sorted", "(", "self", ".", "vocab", ".", "items", "(", ")", ",", "key", "=", "lambda", "kv", ":", "kv", "[", "1", "]", ")", ":", "if", "index", "!=", "token_index", ":", "logger", ".", "warning", "(", "\"Saving vocabulary to {}: vocabulary indices are not consecutive.\"", "\" Please check that the vocabulary is not corrupted!\"", ".", "format", "(", "vocab_file", ")", ")", "index", "=", "token_index", "writer", ".", "write", "(", "token", "+", "u'\\n'", ")", "index", "+=", "1", "return", "vocab_file" ]
Readout the detector .
def readout ( self ) : elec = self . simulate_poisson_variate ( ) elec_pre = self . saturate ( elec ) elec_f = self . pre_readout ( elec_pre ) adu_r = self . base_readout ( elec_f ) adu_p = self . post_readout ( adu_r ) self . clean_up ( ) return adu_p
3,651
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/components/detector.py#L94-L109
[ "def", "_get_dimension_scales", "(", "self", ",", "dimension", ",", "preserve_domain", "=", "False", ")", ":", "if", "preserve_domain", ":", "return", "[", "self", ".", "scales", "[", "k", "]", "for", "k", "in", "self", ".", "scales", "if", "(", "k", "in", "self", ".", "scales_metadata", "and", "self", ".", "scales_metadata", "[", "k", "]", ".", "get", "(", "'dimension'", ")", "==", "dimension", "and", "not", "self", ".", "preserve_domain", ".", "get", "(", "k", ")", ")", "]", "else", ":", "return", "[", "self", ".", "scales", "[", "k", "]", "for", "k", "in", "self", ".", "scales", "if", "(", "k", "in", "self", ".", "scales_metadata", "and", "self", ".", "scales_metadata", "[", "k", "]", ".", "get", "(", "'dimension'", ")", "==", "dimension", ")", "]" ]
parse limited form of arguments of function
def parse_arg_line ( fargs ) : # Convert to literal dict fargs = fargs . strip ( ) if fargs == '' : return { } pairs = [ s . strip ( ) for s in fargs . split ( ',' ) ] # find first "=" result = [ ] for p in pairs : fe = p . find ( "=" ) if fe == - 1 : # no equal raise ValueError ( "malformed" ) key = p [ : fe ] val = p [ fe + 1 : ] tok = "'{}': {}" . format ( key , val ) result . append ( tok ) tokj = ',' . join ( result ) result = "{{ {0} }}" . format ( tokj ) state = ast . literal_eval ( result ) return state
3,652
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/parser.py#L27-L54
[ "def", "project_community", "(", "index", ",", "start", ",", "end", ")", ":", "results", "=", "{", "\"author_metrics\"", ":", "[", "Authors", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"people_top_metrics\"", ":", "[", "Authors", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"orgs_top_metrics\"", ":", "[", "Organizations", "(", "index", ",", "start", ",", "end", ")", "]", ",", "}", "return", "results" ]
Return a given number formatter a price for humans .
def natural_number_with_currency ( number , currency , show_decimal_place = True , use_nbsp = True ) : humanized = '{} {}' . format ( numberformat . format ( number = number , decimal_sep = ',' , decimal_pos = 2 if show_decimal_place else 0 , grouping = 3 , thousand_sep = ' ' , force_grouping = True ) , force_text ( currency ) ) return mark_safe ( humanized . replace ( ' ' , '\u00a0' ) ) if use_nbsp else humanized
3,653
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/formatters/__init__.py#L6-L21
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Extract tags from serialized file
def extract_db_info ( self , obj , keys ) : objl = self . convert ( obj ) result = super ( DataFrameType , self ) . extract_db_info ( objl , keys ) ext = self . datamodel . extractor_map [ 'fits' ] if objl : with objl . open ( ) as hdulist : for field in keys : result [ field ] = ext . extract ( field , hdulist ) tags = result [ 'tags' ] for field in self . tags_keys : tags [ field ] = ext . extract ( field , hdulist ) return result else : return result
3,654
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/types/frame.py#L101-L119
[ "def", "_get_port_speed_price_id", "(", "items", ",", "port_speed", ",", "no_public", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'port_speed'", ":", "continue", "# Check for correct capacity and if the item matches private only", "if", "any", "(", "[", "int", "(", "utils", ".", "lookup", "(", "item", ",", "'capacity'", ")", ")", "!=", "port_speed", ",", "_is_private_port_speed_item", "(", "item", ")", "!=", "no_public", ",", "not", "_is_bonded", "(", "item", ")", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for port speed: '%s'\"", "%", "port_speed", ")" ]
Return a single character read from keyboard
def readc ( prompt , default = None , valid = None , question_mark = True ) : cresult = None # Avoid PyCharm warning # question mark if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' # main loop loop = True while loop : # display prompt if default is None : print ( prompt + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) # read user's input cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) if len ( cresult ) == 1 : # check that all the characters are valid loop = False if valid is not None : for c in cresult : if c not in str ( valid ) : print ( '*** Error: invalid characters found.' ) print ( '*** Valid characters are:' , valid ) print ( '*** Try again!' ) loop = True else : print ( '*** Error: invalid string length. Try again!' ) return cresult
3,655
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/iofunctions.py#L7-L67
[ "def", "calculate_best_fit_vectors", "(", "L", ",", "E", ",", "V", ",", "n_planes", ")", ":", "U", ",", "XV", "=", "E", "[", ":", "]", ",", "[", "]", "# make a copy of E to prevent mutation", "for", "pole", "in", "L", ":", "XV", ".", "append", "(", "vclose", "(", "pole", ",", "V", ")", ")", "# get some points on the great circle", "for", "c", "in", "range", "(", "3", ")", ":", "U", "[", "c", "]", "=", "U", "[", "c", "]", "+", "XV", "[", "-", "1", "]", "[", "c", "]", "# iterate to find best agreement", "angle_tol", "=", "1.", "while", "angle_tol", ">", "0.1", ":", "angles", "=", "[", "]", "for", "k", "in", "range", "(", "n_planes", ")", ":", "for", "c", "in", "range", "(", "3", ")", ":", "U", "[", "c", "]", "=", "U", "[", "c", "]", "-", "XV", "[", "k", "]", "[", "c", "]", "R", "=", "np", ".", "sqrt", "(", "U", "[", "0", "]", "**", "2", "+", "U", "[", "1", "]", "**", "2", "+", "U", "[", "2", "]", "**", "2", ")", "for", "c", "in", "range", "(", "3", ")", ":", "V", "[", "c", "]", "=", "old_div", "(", "U", "[", "c", "]", ",", "R", ")", "XX", "=", "vclose", "(", "L", "[", "k", "]", ",", "V", ")", "ang", "=", "XX", "[", "0", "]", "*", "XV", "[", "k", "]", "[", "0", "]", "+", "XX", "[", "1", "]", "*", "XV", "[", "k", "]", "[", "1", "]", "+", "XX", "[", "2", "]", "*", "XV", "[", "k", "]", "[", "2", "]", "angles", ".", "append", "(", "np", ".", "arccos", "(", "ang", ")", "*", "180.", "/", "np", ".", "pi", ")", "for", "c", "in", "range", "(", "3", ")", ":", "XV", "[", "k", "]", "[", "c", "]", "=", "XX", "[", "c", "]", "U", "[", "c", "]", "=", "U", "[", "c", "]", "+", "XX", "[", "c", "]", "amax", "=", "-", "1", "for", "ang", "in", "angles", ":", "if", "ang", ">", "amax", ":", "amax", "=", "ang", "angle_tol", "=", "amax", "return", "XV" ]
Return value read from keyboard
def read_value ( ftype , prompt , default = None , minval = None , maxval = None , allowed_single_chars = None , question_mark = True ) : # avoid PyCharm warning 'might be referenced before assignment' result = None # question mark if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' # check minimum value if minval is not None : try : iminval = ftype ( minval ) except ValueError : raise ValueError ( "'" + str ( minval ) + "' cannot " + "be used as an minval in readi()" ) else : iminval = None # check maximum value if maxval is not None : try : imaxval = ftype ( maxval ) except ValueError : raise ValueError ( "'" + str ( maxval ) + "' cannot " + "be used as an maxval in readi()" ) else : imaxval = None # minimum and maximum values if minval is None and maxval is None : cminmax = '' elif minval is None : cminmax = ' (number <= ' + str ( imaxval ) + ')' elif maxval is None : cminmax = ' (number >= ' + str ( iminval ) + ')' else : cminmax = ' (' + str ( minval ) + ' <= number <= ' + str ( maxval ) + ')' # main loop loop = True while loop : # display prompt if default is None : print ( prompt + cminmax + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + cminmax + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) # read user's input cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) # if valid allowed single character, return character if len ( cresult ) == 1 : if allowed_single_chars is not None : if cresult in allowed_single_chars : return cresult # convert to ftype value try : result = ftype ( cresult ) except ValueError : print ( "*** Error: invalid " + str ( ftype ) + " value. Try again!" ) else : # check number is within expected range if minval is None and maxval is None : loop = False elif minval is None : if result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) elif maxval is None : if result >= iminval : loop = False else : print ( "*** Error: number out of range. Try again!" ) else : if iminval <= result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) return result
3,656
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/iofunctions.py#L140-L256
[ "def", "parse_summary", "(", "content", ",", "reference_id", "=", "None", ")", ":", "summary", "=", "None", "m", "=", "_END_SUMMARY_PATTERN", ".", "search", "(", "content", ")", "if", "m", ":", "end_of_summary", "=", "m", ".", "start", "(", ")", "m", "=", "_START_SUMMARY_PATTERN", ".", "search", "(", "content", ",", "0", ",", "end_of_summary", ")", "or", "_ALTERNATIVE_START_SUMMARY_PATTERN", ".", "search", "(", "content", ",", "0", ",", "end_of_summary", ")", "if", "m", ":", "summary", "=", "content", "[", "m", ".", "end", "(", ")", ":", "end_of_summary", "]", "elif", "reference_id", "not", "in", "_CABLES_WITH_MALFORMED_SUMMARY", ":", "logger", ".", "debug", "(", "'Found \"end of summary\" but no start in \"%s\", content: \"%s\"'", "%", "(", "reference_id", ",", "content", "[", ":", "end_of_summary", "]", ")", ")", "else", ":", "m", "=", "_PARSE_SUMMARY_PATTERN", ".", "search", "(", "content", ")", "if", "m", ":", "summary", "=", "content", "[", "m", ".", "start", "(", "1", ")", ":", "m", ".", "end", "(", "1", ")", "]", "if", "summary", ":", "summary", "=", "_CLEAN_SUMMARY_CLS_PATTERN", ".", "sub", "(", "u''", ",", "summary", ")", "summary", "=", "_CLEAN_SUMMARY_PATTERN", ".", "sub", "(", "u' '", ",", "summary", ")", "summary", "=", "_CLEAN_SUMMARY_WS_PATTERN", ".", "sub", "(", "u' '", ",", "summary", ")", "summary", "=", "summary", ".", "strip", "(", ")", "return", "summary" ]
Load product object according to name
def load_product_object ( self , name ) : product_entry = self . products [ name ] product = self . _get_base_object ( product_entry ) return product
3,657
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L87-L94
[ "def", "send", "(", "self", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "text_type", ")", "# When data is send back to the client, we should replace the line", "# endings. (We didn't allocate a real pseudo terminal, and the telnet", "# connection is raw, so we are responsible for inserting \\r.)", "self", ".", "stdout", ".", "write", "(", "data", ".", "replace", "(", "'\\n'", ",", "'\\r\\n'", ")", ")", "self", ".", "stdout", ".", "flush", "(", ")" ]
Load all recipes to search for products
def depsolve ( self ) : # load everything requires = { } provides = { } for mode , r in self . recipes . items ( ) : l = self . load_recipe_object ( mode ) for field , vv in l . requirements ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) requires [ name ] = pe for field , vv in l . products ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) provides [ name ] = pe return requires , provides
3,658
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L109-L129
[ "def", "tofile", "(", "self", ",", "filename", ",", "format", "=", "'ascii'", ")", ":", "if", "not", "common", ".", "is_string", "(", "filename", ")", ":", "raise", "TypeError", "(", "'argument filename must be string but got %s'", "%", "(", "type", "(", "filename", ")", ")", ")", "if", "format", "not", "in", "[", "'ascii'", ",", "'binary'", "]", ":", "raise", "TypeError", "(", "'argument format must be ascii | binary'", ")", "filename", "=", "filename", ".", "strip", "(", ")", "if", "not", "filename", ":", "raise", "ValueError", "(", "'filename must be non-empty string'", ")", "if", "filename", "[", "-", "4", ":", "]", "!=", "'.vtk'", ":", "filename", "+=", "'.vtk'", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "self", ".", "to_string", "(", "format", ")", ")", "f", ".", "close", "(", ")" ]
Search the mode that provides a given product
def search_mode_provides ( self , product , pipeline = 'default' ) : pipeline = self . pipelines [ pipeline ] for obj , mode , field in self . iterate_mode_provides ( self . modes , pipeline ) : # extract name from obj if obj . name ( ) == product : return ProductEntry ( obj . name ( ) , mode . key , field ) else : raise ValueError ( 'no mode provides %s' % product )
3,659
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L220-L229
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Select instrument configuration based on OB
def select_configuration ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default configuration selector' ) # get first possible image ref = obresult . get_sample_frame ( ) extr = self . datamodel . extractor_map [ 'fits' ] if ref : # get INSCONF configuration result = extr . extract ( 'insconf' , ref ) if result : # found the keyword, try to match logger . debug ( 'found insconf config uuid=%s' , result ) # Use insconf as uuid key if result in self . configurations : return self . configurations [ result ] else : # Additional check for conf.name for conf in self . configurations . values ( ) : if conf . name == result : return conf else : raise KeyError ( 'insconf {} does not match any config' . format ( result ) ) # If not, try to match by DATE date_obs = extr . extract ( 'observation_date' , ref ) for key , conf in self . configurations . items ( ) : if key == 'default' : # skip default continue if conf . date_end is not None : upper_t = date_obs < conf . date_end else : upper_t = True if upper_t and ( date_obs >= conf . date_start ) : logger . debug ( 'found date match, config uuid=%s' , key ) return conf else : logger . debug ( 'no match, using default configuration' ) return self . configurations [ 'default' ]
3,660
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L251-L292
[ "def", "run", "(", "self", ")", ":", "# Create the thread pool.", "executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "self", ".", "_config", "[", "'num_workers'", "]", ")", "# Wait to ensure multiple senders can be synchronised.", "now", "=", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "start_time", "=", "(", "(", "now", "+", "29", ")", "//", "30", ")", "*", "30", "self", ".", "_log", ".", "info", "(", "'Waiting until {}'", ".", "format", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_time", ")", ")", ")", "while", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "<", "start_time", ":", "time", ".", "sleep", "(", "0.1", ")", "# Run the event loop.", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "try", ":", "loop", ".", "run_until_complete", "(", "self", ".", "_run_loop", "(", "executor", ")", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "# Send the end of stream message to each stream.", "self", ".", "_log", ".", "info", "(", "'Shutting down, closing streams...'", ")", "tasks", "=", "[", "]", "for", "stream", ",", "item_group", "in", "self", ".", "_streams", ":", "tasks", ".", "append", "(", "stream", ".", "async_send_heap", "(", "item_group", ".", "get_end", "(", ")", ")", ")", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")", "self", ".", "_log", ".", "info", "(", "'... finished.'", ")", "executor", ".", "shutdown", "(", ")" ]
Select instrument profile based on OB
def select_profile ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default profile selector' ) # check configuration insconf = obresult . configuration if insconf != 'default' : key = insconf date_obs = None keyname = 'uuid' else : # get first possible image ref = obresult . get_sample_frame ( ) if ref is None : key = obresult . instrument date_obs = None keyname = 'name' else : extr = self . datamodel . extractor_map [ 'fits' ] date_obs = extr . extract ( 'observation_date' , ref ) key = extr . extract ( 'insconf' , ref ) if key is not None : keyname = 'uuid' else : key = extr . extract ( 'instrument' , ref ) keyname = 'name' return key , date_obs , keyname
3,661
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L294-L323
[ "def", "console", "(", "self", ",", "console", ")", ":", "if", "console", "==", "self", ".", "_console", ":", "return", "if", "self", ".", "_console_type", "==", "\"vnc\"", "and", "console", "is", "not", "None", "and", "console", "<", "5900", ":", "raise", "NodeError", "(", "\"VNC console require a port superior or equal to 5900 currently it's {}\"", ".", "format", "(", "console", ")", ")", "if", "self", ".", "_console", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_console", ",", "self", ".", "_project", ")", "self", ".", "_console", "=", "None", "if", "console", "is", "not", "None", ":", "if", "self", ".", "console_type", "==", "\"vnc\"", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ",", "port_range_start", "=", "5900", ",", "port_range_end", "=", "6000", ")", "else", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ")", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: console port set to {port}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "port", "=", "console", ")", ")" ]
Build a recipe object from a given mode name
def get_recipe_object ( self , mode_name , pipeline_name = 'default' ) : active_mode = self . modes [ mode_name ] active_pipeline = self . pipelines [ pipeline_name ] recipe = active_pipeline . get_recipe_object ( active_mode ) return recipe
3,662
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L325-L330
[ "def", "detectWapWml", "(", "self", ")", ":", "return", "UAgentInfo", ".", "vndwap", "in", "self", ".", "__httpAccept", "or", "UAgentInfo", ".", "wml", "in", "self", ".", "__httpAccept" ]
Ask the user to press RETURN to continue after plotting .
def pause_debugplot ( debugplot , optional_prompt = None , pltshow = False , tight_layout = True ) : if debugplot not in DEBUGPLOT_CODES : raise ValueError ( 'Invalid debugplot value:' , debugplot ) if debugplot < 0 : debugplot_ = - debugplot pltclose = True else : debugplot_ = debugplot pltclose = False if pltshow : if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] : if tight_layout : plt . tight_layout ( ) if debugplot_ in [ 1 , 11 , 21 ] : plt . show ( block = False ) plt . pause ( 0.2 ) elif debugplot_ in [ 2 , 12 , 22 ] : print ( 'Press "q" to continue...' , end = '' ) sys . stdout . flush ( ) plt . show ( ) print ( '' ) else : if debugplot_ in [ 2 , 12 , 22 ] : if optional_prompt is None : print ( 'Press <RETURN> to continue...' , end = '' ) else : print ( optional_prompt , end = '' ) sys . stdout . flush ( ) cdummy = sys . stdin . readline ( ) . strip ( ) if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] and pltclose : plt . close ( )
3,663
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/pause_debugplot.py#L21-L82
[ "def", "checkIsConsistent", "(", "self", ")", ":", "if", "is_an_array", "(", "self", ".", "mask", ")", "and", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ":", "raise", "ConsistencyError", "(", "\"Shape mismatch mask={}, data={}\"", ".", "format", "(", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ")", ")" ]
Estimate the mode using the Half Sample mode .
def mode_half_sample ( a , is_sorted = False ) : a = np . asanyarray ( a ) if not is_sorted : sdata = np . sort ( a ) else : sdata = a n = len ( sdata ) if n == 1 : return sdata [ 0 ] elif n == 2 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif n == 3 : ind = - sdata [ 0 ] + 2 * sdata [ 1 ] - sdata [ 2 ] if ind < 0 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif ind > 0 : return 0.5 * ( sdata [ 1 ] + sdata [ 2 ] ) else : return sdata [ 1 ] else : N = int ( math . ceil ( n / 2.0 ) ) w = sdata [ ( N - 1 ) : ] - sdata [ : ( n - N + 1 ) ] ar = w . argmin ( ) return mode_half_sample ( sdata [ ar : ar + N ] , is_sorted = True )
3,664
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/mode.py#L21-L69
[ "def", "file_list", "(", "load", ")", ":", "ret", "=", "set", "(", ")", "try", ":", "for", "container", "in", "__opts__", "[", "'azurefs'", "]", ":", "if", "container", ".", "get", "(", "'saltenv'", ",", "'base'", ")", "!=", "load", "[", "'saltenv'", "]", ":", "continue", "container_list", "=", "_get_container_path", "(", "container", ")", "+", "'.list'", "lk", "=", "container_list", "+", "'.lk'", "salt", ".", "fileserver", ".", "wait_lock", "(", "lk", ",", "container_list", ",", "5", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "container_list", ")", ":", "continue", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "container_list", ",", "'r'", ")", "as", "fp_", ":", "ret", ".", "update", "(", "set", "(", "salt", ".", "utils", ".", "json", ".", "load", "(", "fp_", ")", ")", ")", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'azurefs: an error ocurred retrieving file lists. '", "'It should be resolved next time the fileserver '", "'updates. Please do not manually modify the azurefs '", "'cache directory.'", ")", "return", "list", "(", "ret", ")" ]
Overplot a ds9 region file .
def overplot_ds9reg ( filename , ax ) : # read ds9 region file with open ( filename ) as f : file_content = f . read ( ) . splitlines ( ) # check first line first_line = file_content [ 0 ] if "# Region file format: DS9" not in first_line : raise ValueError ( "Unrecognized ds9 region file format" ) for line in file_content : if line [ 0 : 4 ] == "line" : line_fields = line . split ( ) x1 = float ( line_fields [ 1 ] ) y1 = float ( line_fields [ 2 ] ) x2 = float ( line_fields [ 3 ] ) y2 = float ( line_fields [ 4 ] ) if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . plot ( [ x1 , x2 ] , [ y1 , y2 ] , '-' , color = color ) elif line [ 0 : 4 ] == "text" : line_fields = line . split ( ) x0 = float ( line_fields [ 1 ] ) y0 = float ( line_fields [ 2 ] ) text = line_fields [ 3 ] [ 1 : - 1 ] if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . text ( x0 , y0 , text , fontsize = 8 , bbox = dict ( boxstyle = "round,pad=0.1" , fc = "white" , ec = "grey" , ) , color = color , fontweight = 'bold' , backgroundcolor = 'white' , ha = 'center' ) else : # ignore pass
3,665
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/overplot_ds9reg.py#L14-L64
[ "def", "promote_owner", "(", "self", ",", "stream_id", ",", "user_id", ")", ":", "req_hook", "=", "'pod/v1/room/'", "+", "stream_id", "+", "'/membership/promoteOwner'", "req_args", "=", "'{ \"id\": %s }'", "%", "user_id", "status_code", ",", "response", "=", "self", ".", "__rest__", ".", "POST_query", "(", "req_hook", ",", "req_args", ")", "self", ".", "logger", ".", "debug", "(", "'%s: %s'", "%", "(", "status_code", ",", "response", ")", ")", "return", "status_code", ",", "response" ]
Find indexes of peaks in a 1d array .
def find_peaks_indexes ( arr , window_width = 5 , threshold = 0.0 , fpeak = 0 ) : _check_window_width ( window_width ) if ( fpeak < 0 or fpeak + 1 >= window_width ) : raise ValueError ( 'fpeak must be in the range 0- window_width - 2' ) kernel_peak = kernel_peak_function ( threshold , fpeak ) out = generic_filter ( arr , kernel_peak , window_width , mode = "reflect" ) result , = numpy . nonzero ( out ) return filter_array_margins ( arr , result , window_width )
3,666
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/peaks/peakdet.py#L61-L98
[ "def", "oauth_error_handler", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# OAuthErrors should not happen, so they are not caught here. Hence", "# they will result in a 500 Internal Server Error which is what we", "# are interested in.", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "OAuthClientError", "as", "e", ":", "current_app", ".", "logger", ".", "warning", "(", "e", ".", "message", ",", "exc_info", "=", "True", ")", "return", "oauth2_handle_error", "(", "e", ".", "remote", ",", "e", ".", "response", ",", "e", ".", "code", ",", "e", ".", "uri", ",", "e", ".", "description", ")", "except", "OAuthCERNRejectedAccountError", "as", "e", ":", "current_app", ".", "logger", ".", "warning", "(", "e", ".", "message", ",", "exc_info", "=", "True", ")", "flash", "(", "_", "(", "'CERN account not allowed.'", ")", ",", "category", "=", "'danger'", ")", "return", "redirect", "(", "'/'", ")", "except", "OAuthRejectedRequestError", ":", "flash", "(", "_", "(", "'You rejected the authentication request.'", ")", ",", "category", "=", "'info'", ")", "return", "redirect", "(", "'/'", ")", "except", "AlreadyLinkedError", ":", "flash", "(", "_", "(", "'External service is already linked to another account.'", ")", ",", "category", "=", "'danger'", ")", "return", "redirect", "(", "url_for", "(", "'invenio_oauthclient_settings.index'", ")", ")", "return", "inner" ]
Refine the peak location previously found by find_peaks_indexes
def refine_peaks ( arr , ipeaks , window_width ) : _check_window_width ( window_width ) step = window_width // 2 ipeaks = filter_array_margins ( arr , ipeaks , window_width ) winoff = numpy . arange ( - step , step + 1 , dtype = 'int' ) peakwin = ipeaks [ : , numpy . newaxis ] + winoff ycols = arr [ peakwin ] ww = return_weights ( window_width ) coff2 = numpy . dot ( ww , ycols . T ) uc = - 0.5 * coff2 [ 1 ] / coff2 [ 2 ] yc = coff2 [ 0 ] + uc * ( coff2 [ 1 ] + coff2 [ 2 ] * uc ) xc = ipeaks + 0.5 * ( window_width - 1 ) * uc return xc , yc
3,667
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/peaks/peakdet.py#L137-L174
[ "def", "classes", "(", "request", ")", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", "or", "not", "hasattr", "(", "request", ".", "user", ",", "\"userprofile\"", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'User is not logged in'", ")", ",", "'error_type'", ":", "'user_unauthorized'", "}", ",", "template", "=", "'user_json.html'", ",", "status", "=", "401", ")", "clss", "=", "[", "c", ".", "to_json", "(", ")", "for", "c", "in", "Class", ".", "objects", ".", "filter", "(", "owner", "=", "request", ".", "user", ".", "userprofile", ")", "]", "return", "render_json", "(", "request", ",", "clss", ",", "status", "=", "200", ",", "template", "=", "'user_json.html'", ",", "help_text", "=", "classes", ".", "__doc__", ")" ]
Complete config with default values
def complete_config ( config ) : if not config . has_section ( 'run' ) : config . add_section ( 'run' ) values = { 'basedir' : os . getcwd ( ) , 'task_control' : 'control.yaml' , } for k , v in values . items ( ) : if not config . has_option ( 'run' , k ) : config . set ( 'run' , k , v ) return config
3,668
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/clirun.py#L17-L32
[ "def", "add_parens", "(", "line", ",", "maxline", ",", "indent", ",", "statements", "=", "statements", ",", "count", "=", "count", ")", ":", "if", "line", "[", "0", "]", "in", "statements", ":", "index", "=", "1", "if", "not", "line", "[", "0", "]", ".", "endswith", "(", "' '", ")", ":", "index", "=", "2", "assert", "line", "[", "1", "]", "==", "' '", "line", ".", "insert", "(", "index", ",", "'('", ")", "if", "line", "[", "-", "1", "]", "==", "':'", ":", "line", ".", "insert", "(", "-", "1", ",", "')'", ")", "else", ":", "line", ".", "append", "(", "')'", ")", "# That was the easy stuff. Now for assignments.", "groups", "=", "list", "(", "get_assign_groups", "(", "line", ")", ")", "if", "len", "(", "groups", ")", "==", "1", ":", "# So sad, too bad", "return", "line", "counts", "=", "list", "(", "count", "(", "x", ")", "for", "x", "in", "groups", ")", "didwrap", "=", "False", "# If the LHS is large, wrap it first", "if", "sum", "(", "counts", "[", ":", "-", "1", "]", ")", ">=", "maxline", "-", "indent", "-", "4", ":", "for", "group", "in", "groups", "[", ":", "-", "1", "]", ":", "didwrap", "=", "False", "# Only want to know about last group", "if", "len", "(", "group", ")", ">", "1", ":", "group", ".", "insert", "(", "0", ",", "'('", ")", "group", ".", "insert", "(", "-", "1", ",", "')'", ")", "didwrap", "=", "True", "# Might not need to wrap the RHS if wrapped the LHS", "if", "not", "didwrap", "or", "counts", "[", "-", "1", "]", ">", "maxline", "-", "indent", "-", "10", ":", "groups", "[", "-", "1", "]", ".", "insert", "(", "0", ",", "'('", ")", "groups", "[", "-", "1", "]", ".", "append", "(", "')'", ")", "return", "[", "item", "for", "group", "in", "groups", "for", "item", "in", "group", "]" ]
returns x y background status message
def centering_centroid ( data , xi , yi , box , nloop = 10 , toldist = 1e-3 , maxdist = 10.0 ) : # Store original center cxy = ( xi , yi ) origin = ( xi , yi ) # initial background back = 0.0 if nloop == 0 : return xi , yi , 0.0 , 0 , 'not recentering' for i in range ( nloop ) : nxy , back = _centering_centroid_loop_xy ( data , cxy , box ) # _logger.debug('new center is %s', ncenter) # if we are to far away from the initial point, break dst = distance . euclidean ( origin , nxy ) if dst > maxdist : msg = 'maximum distance (%5.2f) from origin reached' % maxdist return cxy [ 0 ] , cxy [ 1 ] , back , 2 , msg # check convergence dst = distance . euclidean ( nxy , cxy ) if dst < toldist : return nxy [ 0 ] , nxy [ 1 ] , back , 1 , 'converged in iteration %i' % i else : cxy = nxy return nxy [ 0 ] , nxy [ 1 ] , back , 3 , 'not converged in %i iterations' % nloop
3,669
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/recenter.py#L57-L94
[ "def", "get_max_events_in_both_arrays", "(", "events_one", ",", "events_two", ")", ":", "events_one", "=", "np", ".", "ascontiguousarray", "(", "events_one", ")", "# change memory alignement for c++ library", "events_two", "=", "np", ".", "ascontiguousarray", "(", "events_two", ")", "# change memory alignement for c++ library", "event_result", "=", "np", ".", "empty", "(", "shape", "=", "(", "events_one", ".", "shape", "[", "0", "]", "+", "events_two", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "events_one", ".", "dtype", ")", "count", "=", "analysis_functions", ".", "get_max_events_in_both_arrays", "(", "events_one", ",", "events_two", ",", "event_result", ")", "return", "event_result", "[", ":", "count", "]" ]
Set Cache - Control headers and Expires - header .
def cache_for ( * * timedelta_kw ) : max_age_timedelta = timedelta ( * * timedelta_kw ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersFromTimedeltaCallback ( max_age_timedelta ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
3,670
https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L20-L37
[ "def", "path", "(", "self", ",", "target", ",", "args", ",", "kw", ")", ":", "if", "type", "(", "target", ")", "in", "string_types", ":", "if", "':'", "in", "target", ":", "# Build path a nested route name", "prefix", ",", "rest", "=", "target", ".", "split", "(", "':'", ",", "1", ")", "route", "=", "self", ".", "named_routes", "[", "prefix", "]", "prefix_params", "=", "route", ".", "_pop_params", "(", "args", ",", "kw", ")", "prefix_path", "=", "route", ".", "path", "(", "[", "]", ",", "prefix_params", ")", "next_mapper", "=", "route", ".", "resource", "return", "prefix_path", "+", "next_mapper", ".", "path", "(", "rest", ",", "args", ",", "kw", ")", "else", ":", "# Build path for a named route", "return", "self", ".", "named_routes", "[", "target", "]", ".", "path", "(", "args", ",", "kw", ")", "elif", "isinstance", "(", "target", ",", "Route", ")", ":", "# Build path for a route instance, used by build_url('.')", "for", "route", "in", "self", ".", "routes", ":", "if", "route", "is", "target", ":", "return", "route", ".", "path", "(", "args", ",", "kw", ")", "raise", "InvalidArgumentError", "(", "\"Route '%s' not found in this %s object.\"", "%", "(", "target", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "else", ":", "# Build path for resource by object id", "target_id", "=", "id", "(", "target", ")", "if", "target_id", "in", "self", ".", "_lookup", ":", "return", "self", ".", "_lookup", "[", "target_id", "]", ".", "path", "(", "args", ",", "kw", ")", "raise", "InvalidArgumentError", "(", "\"No Route found for target '%s' in this %s object.\"", "%", "(", "target", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Set Cache - Control headers .
def cache ( * cache_control_items , * * cache_control_kw ) : cache_control_kw . update ( cache_control_items ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersCallback ( * * cache_control_kw ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
3,671
https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L41-L66
[ "def", "path", "(", "self", ",", "target", ",", "args", ",", "kw", ")", ":", "if", "type", "(", "target", ")", "in", "string_types", ":", "if", "':'", "in", "target", ":", "# Build path a nested route name", "prefix", ",", "rest", "=", "target", ".", "split", "(", "':'", ",", "1", ")", "route", "=", "self", ".", "named_routes", "[", "prefix", "]", "prefix_params", "=", "route", ".", "_pop_params", "(", "args", ",", "kw", ")", "prefix_path", "=", "route", ".", "path", "(", "[", "]", ",", "prefix_params", ")", "next_mapper", "=", "route", ".", "resource", "return", "prefix_path", "+", "next_mapper", ".", "path", "(", "rest", ",", "args", ",", "kw", ")", "else", ":", "# Build path for a named route", "return", "self", ".", "named_routes", "[", "target", "]", ".", "path", "(", "args", ",", "kw", ")", "elif", "isinstance", "(", "target", ",", "Route", ")", ":", "# Build path for a route instance, used by build_url('.')", "for", "route", "in", "self", ".", "routes", ":", "if", "route", "is", "target", ":", "return", "route", ".", "path", "(", "args", ",", "kw", ")", "raise", "InvalidArgumentError", "(", "\"Route '%s' not found in this %s object.\"", "%", "(", "target", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "else", ":", "# Build path for resource by object id", "target_id", "=", "id", "(", "target", ")", "if", "target_id", "in", "self", ".", "_lookup", ":", "return", "self", ".", "_lookup", "[", "target_id", "]", ".", "path", "(", "args", ",", "kw", ")", "raise", "InvalidArgumentError", "(", "\"No Route found for target '%s' in this %s object.\"", "%", "(", "target", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Set Cache - Control headers for no caching
def dont_cache ( ) : def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersForNoCachingCallback ( ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
3,672
https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L70-L86
[ "def", "_get_role", "(", "rolename", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "'roles'", ",", "rolename", "+", "'.json'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "abort", "(", "\"Couldn't read role file {0}\"", ".", "format", "(", "path", ")", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "try", ":", "role", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "except", "ValueError", "as", "e", ":", "msg", "=", "\"Little Chef found the following error in your\"", "msg", "+=", "\" {0}.json file:\\n {1}\"", ".", "format", "(", "rolename", ",", "str", "(", "e", ")", ")", "abort", "(", "msg", ")", "role", "[", "'fullname'", "]", "=", "rolename", "return", "role" ]
Decorator that is filtering empty parameters .
def filter_empty_parameters ( func ) : @ wraps ( func ) def func_wrapper ( self , * args , * * kwargs ) : my_kwargs = { key : value for key , value in kwargs . items ( ) if value not in EMPTIES } args_is_empty = all ( arg in EMPTIES for arg in args ) if ( { 'source' , 'material' } . issuperset ( my_kwargs ) or not my_kwargs ) and args_is_empty : return return func ( self , * args , * * my_kwargs ) return func_wrapper
3,673
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L330-L348
[ "def", "extract_journal_reference", "(", "line", ",", "override_kbs_files", "=", "None", ")", ":", "kbs", "=", "get_kbs", "(", "custom_kbs_files", "=", "override_kbs_files", ")", "references", ",", "dummy_m", ",", "dummy_c", ",", "dummy_co", "=", "parse_reference_line", "(", "line", ",", "kbs", ")", "for", "elements", "in", "references", ":", "for", "el", "in", "elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "return", "el" ]
Detect and normalize an author UID schema .
def author_id_normalize_and_schema ( uid , schema = None ) : def _get_uid_normalized_in_schema ( _uid , _schema ) : regex , template = _RE_AUTHORS_UID [ _schema ] match = regex . match ( _uid ) if match : return template . format ( match . group ( 'uid' ) ) if idutils . is_orcid ( uid ) and schema in ( None , 'ORCID' ) : return idutils . normalize_orcid ( uid ) , 'ORCID' if schema and schema not in _RE_AUTHORS_UID : # Schema explicitly specified, but this function can't handle it raise UnknownUIDSchema ( uid ) if schema : normalized_uid = _get_uid_normalized_in_schema ( uid , schema ) if normalized_uid : return normalized_uid , schema else : raise SchemaUIDConflict ( schema , uid ) match_schema , normalized_uid = None , None for candidate_schema in _RE_AUTHORS_UID : candidate_uid = _get_uid_normalized_in_schema ( uid , candidate_schema ) if candidate_uid : if match_schema : # Valid against more than one candidate schema, ambiguous raise UnknownUIDSchema ( uid ) match_schema = candidate_schema normalized_uid = candidate_uid if match_schema : return normalized_uid , match_schema # No guessess have been found raise UnknownUIDSchema ( uid )
3,674
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L351-L401
[ "def", "check_events", "(", "self", ",", "timeout", "=", "None", ")", ":", "while", "True", ":", "try", ":", "# blocks up to 'timeout' milliseconds", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "_timeout", "ret", "=", "self", ".", "_pollobj", ".", "poll", "(", "timeout", ")", "except", "select", ".", "error", ",", "err", ":", "if", "err", "[", "0", "]", "==", "errno", ".", "EINTR", ":", "continue", "# interrupted, retry", "else", ":", "raise", "else", ":", "break", "if", "not", "ret", "or", "(", "self", ".", "_pipe", "[", "0", "]", "==", "ret", "[", "0", "]", "[", "0", "]", ")", ":", "return", "False", "# only one fd is polled", "return", "ret", "[", "0", "]", "[", "1", "]", "&", "select", ".", "POLLIN" ]
Normalize arXiv category to be schema compliant .
def normalize_arxiv_category ( category ) : category = _NEW_CATEGORIES . get ( category . lower ( ) , category ) for valid_category in valid_arxiv_categories ( ) : if ( category . lower ( ) == valid_category . lower ( ) or category . lower ( ) . replace ( '-' , '.' ) == valid_category . lower ( ) ) : return valid_category return category
3,675
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L404-L422
[ "def", "receive_message", "(", "self", ",", "message", ",", "data", ")", ":", "# noqa: E501 pylint: disable=too-many-return-statements", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_DEVICE_ADDED", ":", "uuid", "=", "data", "[", "'device'", "]", "[", "'deviceId'", "]", "name", "=", "data", "[", "'device'", "]", "[", "'name'", "]", "self", ".", "_add_member", "(", "uuid", ",", "name", ")", "return", "True", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_DEVICE_REMOVED", ":", "uuid", "=", "data", "[", "'deviceId'", "]", "self", ".", "_remove_member", "(", "uuid", ")", "return", "True", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_DEVICE_UPDATED", ":", "uuid", "=", "data", "[", "'device'", "]", "[", "'deviceId'", "]", "name", "=", "data", "[", "'device'", "]", "[", "'name'", "]", "self", ".", "_add_member", "(", "uuid", ",", "name", ")", "return", "True", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_MULTIZONE_STATUS", ":", "members", "=", "data", "[", "'status'", "]", "[", "'devices'", "]", "members", "=", "{", "member", "[", "'deviceId'", "]", ":", "member", "[", "'name'", "]", "for", "member", "in", "members", "}", "removed_members", "=", "list", "(", "set", "(", "self", ".", "_members", ".", "keys", "(", ")", ")", "-", "set", "(", "members", ".", "keys", "(", ")", ")", ")", "added_members", "=", "list", "(", "set", "(", "members", ".", "keys", "(", ")", ")", "-", "set", "(", "self", ".", "_members", ".", "keys", "(", ")", ")", ")", "_LOGGER", ".", "debug", "(", "\"(%s) Added members %s, Removed members: %s\"", ",", "self", ".", "_uuid", ",", "added_members", ",", "removed_members", ")", "for", "uuid", "in", "removed_members", ":", "self", ".", "_remove_member", "(", "uuid", ")", "for", "uuid", "in", "added_members", ":", "self", ".", "_add_member", "(", "uuid", ",", "members", "[", "uuid", "]", ")", "for", "listener", "in", "list", "(", "self", ".", "_status_listeners", ")", ":", "listener", ".", "multizone_status_received", "(", ")", "return", "True", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_SESSION_UPDATED", ":", "# A temporary group has been formed", "return", "True", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_CASTING_GROUPS", ":", "# Answer to GET_CASTING_GROUPS", "return", "True", "return", "False" ]
List of all arXiv categories that ever existed .
def valid_arxiv_categories ( ) : schema = load_schema ( 'elements/arxiv_categories' ) categories = schema [ 'enum' ] categories . extend ( _NEW_CATEGORIES . keys ( ) ) return categories
3,676
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L425-L438
[ "def", "load_toml_rest_api_config", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Skipping rest api loading from non-existent config file: %s\"", ",", "filename", ")", "return", "RestApiConfig", "(", ")", "LOGGER", ".", "info", "(", "\"Loading rest api information from config: %s\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "raise", "RestApiConfigurationError", "(", "\"Unable to load rest api configuration file: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "invalid_keys", "=", "set", "(", "toml_config", ".", "keys", "(", ")", ")", ".", "difference", "(", "[", "'bind'", ",", "'connect'", ",", "'timeout'", ",", "'opentsdb_db'", ",", "'opentsdb_url'", ",", "'opentsdb_username'", ",", "'opentsdb_password'", ",", "'client_max_size'", "]", ")", "if", "invalid_keys", ":", "raise", "RestApiConfigurationError", "(", "\"Invalid keys in rest api config: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "list", "(", "invalid_keys", ")", ")", ")", ")", ")", "config", "=", "RestApiConfig", "(", "bind", "=", "toml_config", ".", "get", "(", "\"bind\"", ",", "None", ")", ",", "connect", "=", "toml_config", ".", "get", "(", "'connect'", ",", "None", ")", ",", "timeout", "=", "toml_config", ".", "get", "(", "'timeout'", ",", "None", ")", ",", "opentsdb_url", "=", "toml_config", ".", "get", "(", "'opentsdb_url'", ",", "None", ")", ",", "opentsdb_db", "=", "toml_config", ".", "get", "(", "'opentsdb_db'", ",", "None", ")", ",", "opentsdb_username", "=", "toml_config", ".", "get", "(", "'opentsdb_username'", ",", "None", ")", ",", "opentsdb_password", "=", "toml_config", ".", "get", "(", "'opentsdb_password'", ",", "None", ")", ",", "client_max_size", "=", "toml_config", ".", "get", "(", "'client_max_size'", ",", "None", ")", ")", "return", "config" ]
Normalize value to an Inspire category .
def classify_field ( value ) : if not ( isinstance ( value , six . string_types ) and value ) : return schema = load_schema ( 'elements/inspire_field' ) inspire_categories = schema [ 'properties' ] [ 'term' ] [ 'enum' ] for inspire_category in inspire_categories : if value . upper ( ) == inspire_category . upper ( ) : return inspire_category category = normalize_arxiv_category ( value ) return ARXIV_TO_INSPIRE_CATEGORY_MAPPING . get ( category , 'Other' )
3,677
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L441-L464
[ "def", "get_user_last_submissions", "(", "self", ",", "limit", "=", "5", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "{", "}", "request", ".", "update", "(", "{", "\"username\"", ":", "self", ".", "_user_manager", ".", "session_username", "(", ")", "}", ")", "# Before, submissions were first sorted by submission date, then grouped", "# and then resorted by submission date before limiting. Actually, grouping", "# and pushing, keeping the max date, followed by result filtering is much more", "# efficient", "data", "=", "self", ".", "_database", ".", "submissions", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "request", "}", ",", "{", "\"$group\"", ":", "{", "\"_id\"", ":", "{", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", "}", ",", "\"submitted_on\"", ":", "{", "\"$max\"", ":", "\"$submitted_on\"", "}", ",", "\"submissions\"", ":", "{", "\"$push\"", ":", "{", "\"_id\"", ":", "\"$_id\"", ",", "\"result\"", ":", "\"$result\"", ",", "\"status\"", ":", "\"$status\"", ",", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", ",", "\"submitted_on\"", ":", "\"$submitted_on\"", "}", "}", ",", "}", "}", ",", "{", "\"$project\"", ":", "{", "\"submitted_on\"", ":", "1", ",", "\"submissions\"", ":", "{", "# This could be replaced by $filter if mongo v3.2 is set as dependency", "\"$setDifference\"", ":", "[", "{", "\"$map\"", ":", "{", "\"input\"", ":", "\"$submissions\"", ",", "\"as\"", ":", "\"submission\"", ",", "\"in\"", ":", "{", "\"$cond\"", ":", "[", "{", "\"$eq\"", ":", "[", "\"$submitted_on\"", ",", "\"$$submission.submitted_on\"", "]", "}", ",", "\"$$submission\"", ",", "False", "]", "}", "}", "}", ",", "[", "False", "]", "]", "}", "}", "}", ",", "{", "\"$sort\"", ":", "{", "\"submitted_on\"", ":", "pymongo", ".", "DESCENDING", "}", "}", ",", "{", "\"$limit\"", ":", "limit", "}", "]", ")", "return", "[", "item", "[", "\"submissions\"", "]", "[", "0", "]", "for", "item", "in", "data", "]" ]
Split pubnote into journal information .
def split_pubnote ( pubnote_str ) : pubnote = { } parts = pubnote_str . split ( ',' ) if len ( parts ) > 2 : pubnote [ 'journal_title' ] = parts [ 0 ] pubnote [ 'journal_volume' ] = parts [ 1 ] pubnote [ 'page_start' ] , pubnote [ 'page_end' ] , pubnote [ 'artid' ] = split_page_artid ( parts [ 2 ] ) return { key : val for ( key , val ) in six . iteritems ( pubnote ) if val is not None }
3,678
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L501-L511
[ "def", "update_sandbox_product", "(", "self", ",", "product_id", ",", "surge_multiplier", "=", "None", ",", "drivers_available", "=", "None", ",", ")", ":", "args", "=", "{", "'surge_multiplier'", ":", "surge_multiplier", ",", "'drivers_available'", ":", "drivers_available", ",", "}", "endpoint", "=", "'v1.2/sandbox/products/{}'", ".", "format", "(", "product_id", ")", "return", "self", ".", "_api_call", "(", "'PUT'", ",", "endpoint", ",", "args", "=", "args", ")" ]
Retrieve the installed path for the given schema .
def get_schema_path ( schema , resolved = False ) : def _strip_first_path_elem ( path ) : """Pass doctests. Strip the first element of the given path, returning an empty string if there are no more elements. For example, 'something/other' will end up as 'other', but passing then 'other' will return '' """ stripped_path = path . split ( os . path . sep , 1 ) [ 1 : ] return '' . join ( stripped_path ) def _schema_to_normalized_path ( schema ) : """Pass doctests. Extracts the path from the url, makes sure to get rid of any '..' in the path and adds the json extension if not there. """ path = os . path . normpath ( os . path . sep + urlsplit ( schema ) . path ) if path . startswith ( os . path . sep ) : path = path [ 1 : ] if not path . endswith ( '.json' ) : path += '.json' return path path = _schema_to_normalized_path ( schema ) while path : if resolved : schema_path = os . path . abspath ( os . path . join ( _resolved_schema_root_path , path ) ) else : schema_path = os . path . abspath ( os . path . join ( _schema_root_path , path ) ) if os . path . exists ( schema_path ) : return os . path . abspath ( schema_path ) path = _strip_first_path_elem ( path ) raise SchemaNotFound ( schema = schema )
3,679
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L545-L598
[ "def", "register_variable", "(", "self", ",", "v", ",", "key", ",", "eternal", "=", "True", ")", ":", "if", "type", "(", "key", ")", "is", "not", "tuple", ":", "raise", "TypeError", "(", "\"Variable tracking key must be a tuple\"", ")", "if", "eternal", ":", "self", ".", "eternal_tracked_variables", "[", "key", "]", "=", "v", "else", ":", "self", ".", "temporal_tracked_variables", "=", "dict", "(", "self", ".", "temporal_tracked_variables", ")", "ctrkey", "=", "key", "+", "(", "None", ",", ")", "ctrval", "=", "self", ".", "temporal_tracked_variables", ".", "get", "(", "ctrkey", ",", "0", ")", "+", "1", "self", ".", "temporal_tracked_variables", "[", "ctrkey", "]", "=", "ctrval", "tempkey", "=", "key", "+", "(", "ctrval", ",", ")", "self", ".", "temporal_tracked_variables", "[", "tempkey", "]", "=", "v" ]
Load the given schema from wherever it s installed .
def load_schema ( schema_name , resolved = False ) : schema_data = '' with open ( get_schema_path ( schema_name , resolved ) ) as schema_fd : schema_data = json . loads ( schema_fd . read ( ) ) return schema_data
3,680
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L601-L616
[ "def", "_check_for_boolean_pair_reduction", "(", "self", ",", "kwargs", ")", ":", "if", "'reduction_forcing_pairs'", "in", "self", ".", "_meta_data", ":", "for", "key1", ",", "key2", "in", "self", ".", "_meta_data", "[", "'reduction_forcing_pairs'", "]", ":", "kwargs", "=", "self", ".", "_reduce_boolean_pair", "(", "kwargs", ",", "key1", ",", "key2", ")", "return", "kwargs" ]
Load the schema from a given record .
def _load_schema_for_record ( data , schema = None ) : if schema is None : if '$schema' not in data : raise SchemaKeyNotFound ( data = data ) schema = data [ '$schema' ] if isinstance ( schema , six . string_types ) : schema = load_schema ( schema_name = schema ) return schema
3,681
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L627-L650
[ "def", "stop_experiment", "(", "args", ")", ":", "experiment_id_list", "=", "parse_ids", "(", "args", ")", "if", "experiment_id_list", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "for", "experiment_id", "in", "experiment_id_list", ":", "print_normal", "(", "'Stoping experiment %s'", "%", "experiment_id", ")", "nni_config", "=", "Config", "(", "experiment_dict", "[", "experiment_id", "]", "[", "'fileName'", "]", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "rest_pid", ":", "kill_command", "(", "rest_pid", ")", "tensorboard_pid_list", "=", "nni_config", ".", "get_config", "(", "'tensorboardPidList'", ")", "if", "tensorboard_pid_list", ":", "for", "tensorboard_pid", "in", "tensorboard_pid_list", ":", "try", ":", "kill_command", "(", "tensorboard_pid", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "nni_config", ".", "set_config", "(", "'tensorboardPidList'", ",", "[", "]", ")", "print_normal", "(", "'Stop experiment success!'", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'status'", ",", "'STOPPED'", ")", "time_now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'endTime'", ",", "str", "(", "time_now", ")", ")" ]
Validate the given dictionary against the given schema .
def validate ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) return jsonschema_validate ( instance = data , schema = schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker , )
3,682
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L653-L678
[ "def", "_parse_guild_members", "(", "self", ",", "parsed_content", ")", ":", "member_rows", "=", "parsed_content", ".", "find_all", "(", "\"tr\"", ",", "{", "'bgcolor'", ":", "[", "\"#D4C0A1\"", ",", "\"#F1E0C6\"", "]", "}", ")", "previous_rank", "=", "{", "}", "for", "row", "in", "member_rows", ":", "columns", "=", "row", ".", "find_all", "(", "'td'", ")", "values", "=", "tuple", "(", "c", ".", "text", ".", "replace", "(", "\"\\u00a0\"", ",", "\" \"", ")", "for", "c", "in", "columns", ")", "if", "len", "(", "columns", ")", "==", "COLS_GUILD_MEMBER", ":", "self", ".", "_parse_current_member", "(", "previous_rank", ",", "values", ")", "if", "len", "(", "columns", ")", "==", "COLS_INVITED_MEMBER", ":", "self", ".", "_parse_invited_member", "(", "values", ")" ]
Validation errors for a given record .
def get_validation_errors ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) errors = Draft4Validator ( schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker ) return errors . iter_errors ( data )
3,683
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L681-L707
[ "def", "mv_connect_generators", "(", "mv_grid_district", ",", "graph", ",", "debug", "=", "False", ")", ":", "generator_buffer_radius", "=", "cfg_ding0", ".", "get", "(", "'mv_connect'", ",", "'generator_buffer_radius'", ")", "generator_buffer_radius_inc", "=", "cfg_ding0", ".", "get", "(", "'mv_connect'", ",", "'generator_buffer_radius_inc'", ")", "# WGS84 (conformal) to ETRS (equidistant) projection", "proj1", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:4326'", ")", ",", "# source coordinate system", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:3035'", ")", ")", "# destination coordinate system", "# ETRS (equidistant) to WGS84 (conformal) projection", "proj2", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:3035'", ")", ",", "# source coordinate system", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:4326'", ")", ")", "# destination coordinate system", "for", "generator", "in", "sorted", "(", "mv_grid_district", ".", "mv_grid", ".", "generators", "(", ")", ",", "key", "=", "lambda", "x", ":", "repr", "(", "x", ")", ")", ":", "# ===== voltage level 4: generator has to be connected to MV station =====", "if", "generator", ".", "v_level", "==", "4", ":", "mv_station", "=", "mv_grid_district", ".", "mv_grid", ".", "station", "(", ")", "branch_length", "=", "calc_geo_dist_vincenty", "(", "generator", ",", "mv_station", ")", "# TODO: set branch type to something reasonable (to be calculated)", "branch_kind", "=", "mv_grid_district", ".", "mv_grid", ".", "default_branch_kind", "branch_type", "=", "mv_grid_district", ".", "mv_grid", ".", "default_branch_type", "branch", "=", "BranchDing0", "(", "length", "=", "branch_length", ",", "kind", "=", "branch_kind", ",", "type", "=", "branch_type", ",", "ring", "=", "None", ")", "graph", ".", "add_edge", "(", "generator", ",", "mv_station", ",", "branch", "=", "branch", ")", "if", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} was connected to {1}'", ".", "format", "(", "generator", ",", "mv_station", ")", ")", "# ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) =====", "elif", "generator", ".", "v_level", "==", "5", ":", "generator_shp", "=", "transform", "(", "proj1", ",", "generator", ".", "geo_data", ")", "# get branches within a the predefined radius `generator_buffer_radius`", "branches", "=", "calc_geo_branches_in_buffer", "(", "generator", ",", "mv_grid_district", ".", "mv_grid", ",", "generator_buffer_radius", ",", "generator_buffer_radius_inc", ",", "proj1", ")", "# calc distance between generator and grid's lines -> find nearest line", "conn_objects_min_stack", "=", "find_nearest_conn_objects", "(", "generator_shp", ",", "branches", ",", "proj1", ",", "conn_dist_weight", "=", "1", ",", "debug", "=", "debug", ",", "branches_only", "=", "False", ")", "# connect!", "# go through the stack (from nearest to most far connection target object)", "generator_connected", "=", "False", "for", "dist_min_obj", "in", "conn_objects_min_stack", ":", "# Note 1: conn_dist_ring_mod=0 to avoid re-routing of existent lines", "# Note 2: In connect_node(), the default cable/line type of grid is used. This is reasonable since", "# the max. allowed power of the smallest possible cable/line type (3.64 MVA for overhead", "# line of type 48-AL1/8-ST1A) exceeds the max. allowed power of a generator (4.5 MVA (dena))", "# (if connected separately!)", "target_obj_result", "=", "connect_node", "(", "generator", ",", "generator_shp", ",", "mv_grid_district", ".", "mv_grid", ",", "dist_min_obj", ",", "proj2", ",", "graph", ",", "conn_dist_ring_mod", "=", "0", ",", "debug", "=", "debug", ")", "if", "target_obj_result", "is", "not", "None", ":", "if", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} was connected to {1}'", ".", "format", "(", "generator", ",", "target_obj_result", ")", ")", "generator_connected", "=", "True", "break", "if", "not", "generator_connected", "and", "debug", ":", "logger", ".", "debug", "(", "'Generator {0} could not be connected, try to '", "'increase the parameter `generator_buffer_radius` in '", "'config file `config_calc.cfg` to gain more possible '", "'connection points.'", ".", "format", "(", "generator", ")", ")", "return", "graph" ]
Normalize collaboration string .
def normalize_collaboration ( collaboration ) : if not collaboration : return [ ] collaboration = collaboration . strip ( ) if collaboration . startswith ( '(' ) and collaboration . endswith ( ')' ) : collaboration = collaboration [ 1 : - 1 ] collaborations = _RE_AND . split ( collaboration ) collaborations = ( _RE_COLLABORATION_LEADING . sub ( '' , collab ) for collab in collaborations ) collaborations = ( _RE_COLLABORATION_TRAILING . sub ( '' , collab ) for collab in collaborations ) return [ collab . strip ( ) for collab in collaborations ]
3,684
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L710-L737
[ "async", "def", "set_max_ch_setpoint", "(", "self", ",", "temperature", ",", "timeout", "=", "OTGW_DEFAULT_TIMEOUT", ")", ":", "cmd", "=", "OTGW_CMD_SET_MAX", "status", "=", "{", "}", "ret", "=", "await", "self", ".", "_wait_for_cmd", "(", "cmd", ",", "temperature", ",", "timeout", ")", "if", "ret", "is", "None", ":", "return", "ret", "=", "float", "(", "ret", ")", "status", "[", "DATA_MAX_CH_SETPOINT", "]", "=", "ret", "self", ".", "_update_status", "(", "status", ")", "return", "ret" ]
Get the license abbreviation from an URL .
def get_license_from_url ( url ) : if not url : return split_url = urlsplit ( url , scheme = 'http' ) if split_url . netloc . lower ( ) == 'creativecommons.org' : if 'publicdomain' in split_url . path : match = _RE_PUBLIC_DOMAIN_URL . match ( split_url . path ) if match is None : license = [ 'public domain' ] else : license = [ 'CC0' ] license . extend ( part for part in match . groups ( ) if part ) else : license = [ 'CC' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part . upper ( ) for part in match . groups ( ) if part ) elif split_url . netloc == 'arxiv.org' : license = [ 'arXiv' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part for part in match . groups ( ) if part ) else : raise ValueError ( 'Unknown license URL' ) return u' ' . join ( license )
3,685
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L740-L776
[ "def", "blob_data_to_dict", "(", "stat_names", ",", "blobs", ")", ":", "# get the dtypes of each of the stats; we'll just take this from the", "# first iteration and walker", "dtypes", "=", "[", "type", "(", "val", ")", "for", "val", "in", "blobs", "[", "0", "]", "[", "0", "]", "]", "assert", "len", "(", "stat_names", ")", "==", "len", "(", "dtypes", ")", ",", "(", "\"number of stat names must match length of tuples in the blobs\"", ")", "# convert to an array; to ensure that we get the dtypes correct, we'll", "# cast to a structured array", "raw_stats", "=", "numpy", ".", "array", "(", "blobs", ",", "dtype", "=", "zip", "(", "stat_names", ",", "dtypes", ")", ")", "# transpose so that it has shape nwalkers x niterations", "raw_stats", "=", "raw_stats", ".", "transpose", "(", ")", "# now return as a dictionary", "return", "{", "stat", ":", "raw_stats", "[", "stat", "]", "for", "stat", "in", "stat_names", "}" ]
Convert a publication_info value from the old format to the new .
def convert_old_publication_info_to_new ( publication_infos ) : result = [ ] hidden_publication_infos = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_OLD_TO_NEW [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and journal_volume and len ( journal_volume ) == 4 : try : was_last_century = int ( journal_volume [ : 2 ] ) > 50 except ValueError : pass else : _publication_info [ 'year' ] = int ( '19' + journal_volume [ : 2 ] if was_last_century else '20' + journal_volume [ : 2 ] ) _publication_info [ 'journal_volume' ] = journal_volume [ 2 : ] result . append ( _publication_info ) continue if journal_title and journal_volume and journal_title . lower ( ) not in JOURNALS_IGNORED_IN_OLD_TO_NEW : volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER . match ( journal_volume ) volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER . match ( journal_volume ) match = volume_starts_with_a_letter or volume_ends_with_a_letter if match : _publication_info . pop ( 'journal_record' , None ) if journal_title in _JOURNALS_RENAMED_OLD_TO_NEW . values ( ) : _publication_info [ 'journal_title' ] = journal_title else : _publication_info [ 'journal_title' ] = '' . join ( [ journal_title , '' if journal_title . endswith ( '.' ) else ' ' , match . group ( 'letter' ) , ] ) _publication_info [ 'journal_volume' ] = match . group ( 'volume' ) hidden = _publication_info . pop ( 'hidden' , None ) if hidden : hidden_publication_infos . append ( _publication_info ) else : result . append ( _publication_info ) for publication_info in hidden_publication_infos : if publication_info not in result : publication_info [ 'hidden' ] = True result . append ( publication_info ) return result
3,686
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L779-L872
[ "def", "remove_system", "(", "self", ",", "system", ")", ":", "if", "system", "in", "self", ".", "_systems", ":", "self", ".", "_systems", ".", "remove", "(", "system", ")", "else", ":", "raise", "UnmanagedSystemError", "(", "system", ")" ]
Convert back a publication_info value from the new format to the old .
def convert_new_publication_info_to_old ( publication_infos ) : def _needs_a_hidden_pubnote ( journal_title , journal_volume ) : return ( journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE [ journal_title ] ) result = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_NEW_TO_OLD [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) year = _publication_info . get ( 'year' ) if ( journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and journal_volume and len ( journal_volume ) == 2 ) : two_digit_year = str ( year ) [ 2 : ] _publication_info [ 'journal_volume' ] = '' . join ( [ two_digit_year , journal_volume ] ) result . append ( _publication_info ) continue if journal_title and journal_volume : match = _RE_TITLE_ENDS_WITH_A_LETTER . match ( journal_title ) if match and _needs_a_hidden_pubnote ( journal_title , journal_volume ) : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = journal_volume + match . group ( 'letter' ) result . append ( _publication_info ) _publication_info = copy . deepcopy ( publication_info ) _publication_info [ 'hidden' ] = True _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume result . append ( _publication_info ) return result
3,687
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L875-L934
[ "def", "state_not_literal", "(", "self", ",", "value", ")", ":", "value", "=", "negate", "=", "chr", "(", "value", ")", "while", "value", "==", "negate", ":", "value", "=", "choice", "(", "self", ".", "literals", ")", "yield", "value" ]
Used to parse an incorect url to try to fix it with the most common ocurrences for errors . If the fixed url is still incorrect it returns None .
def fix_reference_url ( url ) : new_url = url new_url = fix_url_bars_instead_of_slashes ( new_url ) new_url = fix_url_add_http_if_missing ( new_url ) new_url = fix_url_replace_tilde ( new_url ) try : rfc3987 . parse ( new_url , rule = "URI" ) return new_url except ValueError : return url
3,688
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L957-L976
[ "def", "PopEvents", "(", "self", ")", ":", "event", "=", "self", ".", "PopEvent", "(", ")", "while", "event", ":", "yield", "event", "event", "=", "self", ".", "PopEvent", "(", ")" ]
Return True if obj contains an arXiv identifier .
def is_arxiv ( obj ) : arxiv_test = obj . split ( ) if not arxiv_test : return False matched_arxiv = ( RE_ARXIV_PRE_2007_CLASS . match ( arxiv_test [ 0 ] ) or RE_ARXIV_POST_2007_CLASS . match ( arxiv_test [ 0 ] ) ) if not matched_arxiv : return False if not matched_arxiv . group ( 'category' ) : return True valid_arxiv_categories_lower = [ category . lower ( ) for category in valid_arxiv_categories ( ) ] category = matched_arxiv . group ( 'category' ) . lower ( ) return ( category in valid_arxiv_categories_lower or category . replace ( '-' , '.' ) in valid_arxiv_categories_lower )
3,689
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L987-L1009
[ "def", "stop", "(", "self", ")", ":", "self", ".", "shutdown", ".", "set", "(", ")", "for", "monitor", "in", "self", ".", "observers", ":", "monitor", ".", "stop", "(", ")", "self", ".", "wind_down", "(", ")", "for", "monitor", "in", "self", ".", "observers", ":", "monitor", ".", "join", "(", ")", "for", "thread", "in", "self", ".", "thread_pool", ".", "values", "(", ")", ":", "thread", ".", "join", "(", ")", "self", ".", "work_pool", ".", "shutdown", "(", ")" ]
Return a normalized arXiv identifier from obj .
def normalize_arxiv ( obj ) : obj = obj . split ( ) [ 0 ] matched_arxiv_pre = RE_ARXIV_PRE_2007_CLASS . match ( obj ) if matched_arxiv_pre : return ( '/' . join ( matched_arxiv_pre . group ( "extraidentifier" , "identifier" ) ) ) . lower ( ) matched_arxiv_post = RE_ARXIV_POST_2007_CLASS . match ( obj ) if matched_arxiv_post : return matched_arxiv_post . group ( "identifier" ) return None
3,690
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L1012-L1024
[ "def", "remove_all_callbacks", "(", "self", ")", ":", "for", "cb_id", "in", "list", "(", "self", ".", "_next_tick_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_next_tick_callback", "(", "cb_id", ")", "for", "cb_id", "in", "list", "(", "self", ".", "_timeout_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_timeout_callback", "(", "cb_id", ")", "for", "cb_id", "in", "list", "(", "self", ".", "_periodic_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_periodic_callback", "(", "cb_id", ")" ]
Resolve a uri or relative path to a schema .
def resolve_remote ( self , uri ) : try : return super ( LocalRefResolver , self ) . resolve_remote ( uri ) except ValueError : return super ( LocalRefResolver , self ) . resolve_remote ( 'file://' + get_schema_path ( uri . rsplit ( '.json' , 1 ) [ 0 ] ) )
3,691
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L535-L542
[ "def", "binary_operator", "(", "op", ")", ":", "# When combining a Filter with a NumericalExpression, we use this", "# attrgetter instance to defer to the commuted interpretation of the", "# NumericalExpression operator.", "commuted_method_getter", "=", "attrgetter", "(", "method_name_for_op", "(", "op", ",", "commute", "=", "True", ")", ")", "def", "binary_operator", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "self", ",", "NumericalExpression", ")", ":", "self_expr", ",", "other_expr", ",", "new_inputs", "=", "self", ".", "build_binary_op", "(", "op", ",", "other", ",", ")", "return", "NumExprFilter", ".", "create", "(", "\"({left}) {op} ({right})\"", ".", "format", "(", "left", "=", "self_expr", ",", "op", "=", "op", ",", "right", "=", "other_expr", ",", ")", ",", "new_inputs", ",", ")", "elif", "isinstance", "(", "other", ",", "NumericalExpression", ")", ":", "# NumericalExpression overrides numerical ops to correctly handle", "# merging of inputs. Look up and call the appropriate", "# right-binding operator with ourself as the input.", "return", "commuted_method_getter", "(", "other", ")", "(", "self", ")", "elif", "isinstance", "(", "other", ",", "Term", ")", ":", "if", "other", ".", "dtype", "!=", "bool_dtype", ":", "raise", "BadBinaryOperator", "(", "op", ",", "self", ",", "other", ")", "if", "self", "is", "other", ":", "return", "NumExprFilter", ".", "create", "(", "\"x_0 {op} x_0\"", ".", "format", "(", "op", "=", "op", ")", ",", "(", "self", ",", ")", ",", ")", "return", "NumExprFilter", ".", "create", "(", "\"x_0 {op} x_1\"", ".", "format", "(", "op", "=", "op", ")", ",", "(", "self", ",", "other", ")", ",", ")", "elif", "isinstance", "(", "other", ",", "int", ")", ":", "# Note that this is true for bool as well", "return", "NumExprFilter", ".", "create", "(", "\"x_0 {op} {constant}\"", ".", "format", "(", "op", "=", "op", ",", "constant", "=", "int", "(", "other", ")", ")", ",", "binds", "=", "(", "self", ",", ")", ",", ")", "raise", "BadBinaryOperator", "(", "op", ",", "self", ",", "other", ")", "binary_operator", ".", "__doc__", "=", "\"Binary Operator: '%s'\"", "%", "op", "return", "binary_operator" ]
Set the path of the file .
def set_path ( self , path ) : if os . path . isabs ( path ) : path = os . path . normpath ( os . path . join ( self . cwd , path ) ) self . path = path self . relative = os . path . relpath ( self . path , self . base )
3,692
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/file.py#L44-L50
[ "def", "_generate_noise_system", "(", "dimensions_tr", ",", "spatial_sd", ",", "temporal_sd", ",", "spatial_noise_type", "=", "'gaussian'", ",", "temporal_noise_type", "=", "'gaussian'", ",", ")", ":", "def", "noise_volume", "(", "dimensions", ",", "noise_type", ",", ")", ":", "if", "noise_type", "==", "'rician'", ":", "# Generate the Rician noise (has an SD of 1)", "noise", "=", "stats", ".", "rice", ".", "rvs", "(", "b", "=", "0", ",", "loc", "=", "0", ",", "scale", "=", "1.527", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'exponential'", ":", "# Make an exponential distribution (has an SD of 1)", "noise", "=", "stats", ".", "expon", ".", "rvs", "(", "0", ",", "scale", "=", "1", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'gaussian'", ":", "noise", "=", "np", ".", "random", ".", "randn", "(", "np", ".", "prod", "(", "dimensions", ")", ")", ".", "reshape", "(", "dimensions", ")", "# Return the noise", "return", "noise", "# Get just the xyz coordinates", "dimensions", "=", "np", ".", "asarray", "(", "[", "dimensions_tr", "[", "0", "]", ",", "dimensions_tr", "[", "1", "]", ",", "dimensions_tr", "[", "2", "]", ",", "1", "]", ")", "# Generate noise", "spatial_noise", "=", "noise_volume", "(", "dimensions", ",", "spatial_noise_type", ")", "temporal_noise", "=", "noise_volume", "(", "dimensions_tr", ",", "temporal_noise_type", ")", "# Make the system noise have a specific spatial variability", "spatial_noise", "*=", "spatial_sd", "# Set the size of the noise", "temporal_noise", "*=", "temporal_sd", "# The mean in time of system noise needs to be zero, so subtract the", "# means of the temporal noise in time", "temporal_noise_mean", "=", "np", ".", "mean", "(", "temporal_noise", ",", "3", ")", ".", "reshape", "(", "dimensions", "[", "0", "]", ",", "dimensions", "[", "1", "]", ",", "dimensions", "[", "2", "]", ",", "1", ")", "temporal_noise", "=", "temporal_noise", "-", "temporal_noise_mean", "# Save the combination", "system_noise", "=", "spatial_noise", "+", "temporal_noise", "return", "system_noise" ]
Clone the file .
def clone ( self , path = None , * , with_contents = True , * * options ) : file = File ( path if path else self . path , cwd = options . get ( "cwd" , self . cwd ) ) file . base = options . get ( "base" , self . base ) if with_contents : file . contents = options . get ( "contents" , self . contents ) return file
3,693
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/file.py#L53-L61
[ "def", "_generate_noise_system", "(", "dimensions_tr", ",", "spatial_sd", ",", "temporal_sd", ",", "spatial_noise_type", "=", "'gaussian'", ",", "temporal_noise_type", "=", "'gaussian'", ",", ")", ":", "def", "noise_volume", "(", "dimensions", ",", "noise_type", ",", ")", ":", "if", "noise_type", "==", "'rician'", ":", "# Generate the Rician noise (has an SD of 1)", "noise", "=", "stats", ".", "rice", ".", "rvs", "(", "b", "=", "0", ",", "loc", "=", "0", ",", "scale", "=", "1.527", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'exponential'", ":", "# Make an exponential distribution (has an SD of 1)", "noise", "=", "stats", ".", "expon", ".", "rvs", "(", "0", ",", "scale", "=", "1", ",", "size", "=", "dimensions", ")", "elif", "noise_type", "==", "'gaussian'", ":", "noise", "=", "np", ".", "random", ".", "randn", "(", "np", ".", "prod", "(", "dimensions", ")", ")", ".", "reshape", "(", "dimensions", ")", "# Return the noise", "return", "noise", "# Get just the xyz coordinates", "dimensions", "=", "np", ".", "asarray", "(", "[", "dimensions_tr", "[", "0", "]", ",", "dimensions_tr", "[", "1", "]", ",", "dimensions_tr", "[", "2", "]", ",", "1", "]", ")", "# Generate noise", "spatial_noise", "=", "noise_volume", "(", "dimensions", ",", "spatial_noise_type", ")", "temporal_noise", "=", "noise_volume", "(", "dimensions_tr", ",", "temporal_noise_type", ")", "# Make the system noise have a specific spatial variability", "spatial_noise", "*=", "spatial_sd", "# Set the size of the noise", "temporal_noise", "*=", "temporal_sd", "# The mean in time of system noise needs to be zero, so subtract the", "# means of the temporal noise in time", "temporal_noise_mean", "=", "np", ".", "mean", "(", "temporal_noise", ",", "3", ")", ".", "reshape", "(", "dimensions", "[", "0", "]", ",", "dimensions", "[", "1", "]", ",", "dimensions", "[", "2", "]", ",", "1", ")", "temporal_noise", "=", "temporal_noise", "-", "temporal_noise_mean", "# Save the combination", "system_noise", "=", "spatial_noise", "+", "temporal_noise", "return", "system_noise" ]
Launch the CLI .
def launch_cli ( ) : # Create the CLI argument parser parser = argparse . ArgumentParser ( prog = "pylp" , description = "Call some tasks defined in your pylpfile." ) # Version of Pylp parser . add_argument ( "-v" , "--version" , action = "version" , version = "Pylp %s" % version , help = "get the Pylp version and exit" ) # Set the pylpfile location parser . add_argument ( '--pylpfile' , nargs = 1 , help = "manually set path of pylpfile" , metavar = "<path>" ) # Set the pylpfile location parser . add_argument ( '--cwd' , nargs = 1 , help = "manually set the CWD" , metavar = "<dir path>" ) # Force Pylp to not display colors parser . add_argument ( '--no-color' , action = "store_false" , help = "force Pylp to not display colors" ) # Disable logging parser . add_argument ( '--silent' , action = "store_true" , help = "disable all Pylp logging" ) # List of tasks to execute parser . add_argument ( 'tasks' , nargs = "*" , default = [ "default" ] , help = "tasks to execute (if none, execute the 'default' task)" , metavar = "<task>" ) # Parse the CLI arguments args = parser . parse_args ( ) # Current working directory (CWD) if args . cwd : config . cwd = args . cwd [ 0 ] else : config . cwd = os . getcwd ( ) # Get the pylpfile location if args . pylpfile : pylpfile = args . pylpfile [ 0 ] if not args . pylpfile : pylpfile = path . join ( config . cwd , "pylpfile.py" ) elif not args . cwd : config . cwd = path . dirname ( pylpfile ) # Must the terminal have colors? config . color = args . no_color # Must Pylp be silent (no logging)? config . silent = args . silent # Execute the pylpfile run ( pylpfile , args . tasks )
3,694
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/cli.py#L29-L110
[ "def", "delete_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "deleted_item_count", "=", "0", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "_entity_cls", ")", "try", ":", "deleted_item_count", "=", "repository", ".", "delete_all", "(", "self", ".", "_criteria", ")", "except", "Exception", ":", "# FIXME Log Exception", "raise", "return", "deleted_item_count" ]
Add an affiliation .
def add_affiliation ( self , value , curated_relation = None , record = None ) : if value : affiliation = { 'value' : value } if record : affiliation [ 'record' ] = record if curated_relation is not None : affiliation [ 'curated_relation' ] = curated_relation self . _ensure_list_field ( 'affiliations' , affiliation )
3,695
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/signatures.py#L69-L85
[ "def", "destroy_page", "(", "self", ",", "tab_dict", ")", ":", "# logger.info(\"destroy page %s\" % tab_dict['controller'].model.state.get_path())", "if", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "is", "not", "None", ":", "handler_id", "=", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "if", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "handler_is_connected", "(", "handler_id", ")", ":", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "disconnect", "(", "handler_id", ")", "else", ":", "logger", ".", "warning", "(", "\"Source code changed handler of state {0} was already removed.\"", ".", "format", "(", "tab_dict", "[", "'state_m'", "]", ")", ")", "self", ".", "remove_controller", "(", "tab_dict", "[", "'controller'", "]", ")" ]
Set a unique ID .
def set_uid ( self , uid , schema = None ) : try : uid , schema = author_id_normalize_and_schema ( uid , schema ) except UnknownUIDSchema : # Explicit schema wasn't provided, and the UID is too little # to figure out the schema of it, this however doesn't mean # the UID is invalid pass self . _ensure_field ( 'ids' , [ ] ) self . obj [ 'ids' ] = [ id_ for id_ in self . obj [ 'ids' ] if id_ . get ( 'schema' ) != schema ] self . _add_uid ( uid , schema )
3,696
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/signatures.py#L111-L136
[ "def", "getMetastable", "(", "rates", ",", "ver", ":", "np", ".", "ndarray", ",", "lamb", ",", "br", ",", "reactfn", ":", "Path", ")", ":", "with", "h5py", ".", "File", "(", "reactfn", ",", "'r'", ")", "as", "f", ":", "A", "=", "f", "[", "'/metastable/A'", "]", "[", ":", "]", "lambnew", "=", "f", "[", "'/metastable/lambda'", "]", ".", "value", ".", "ravel", "(", "order", "=", "'F'", ")", "# some are not 1-D!", "vnew", "=", "np", ".", "concatenate", "(", "(", "A", "[", ":", "2", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1s'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "2", ":", "4", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1d'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "4", ":", "]", "*", "rates", ".", "loc", "[", "...", ",", "'noii2p'", "]", ".", "values", "[", ":", ",", "None", "]", ")", ",", "axis", "=", "-", "1", ")", "assert", "vnew", ".", "shape", "==", "(", "rates", ".", "shape", "[", "0", "]", ",", "A", ".", "size", ")", "return", "catvl", "(", "rates", ".", "alt_km", ",", "ver", ",", "vnew", ",", "lamb", ",", "lambnew", ",", "br", ")" ]
Create singleton from class
def singleton ( klass ) : instances = { } def getinstance ( * args , * * kwargs ) : if klass not in instances : instances [ klass ] = klass ( * args , * * kwargs ) return instances [ klass ] return wraps ( klass ) ( getinstance )
3,697
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/decorators.py#L13-L23
[ "def", "render", "(", "self", ",", "data", ",", "accepted_media_type", "=", "None", ",", "renderer_context", "=", "None", ")", ":", "if", "'SWAGGER_JSON_PATH'", "in", "os", ".", "environ", ":", "with", "io", ".", "open", "(", "os", ".", "environ", "[", "'SWAGGER_JSON_PATH'", "]", ",", "'rb'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "else", ":", "return", "super", "(", "ConditionalOpenAPIRenderer", ",", "self", ")", ".", "render", "(", "data", ",", "accepted_media_type", ",", "renderer_context", ")" ]
Activate language only for one method or function
def translation_activate_block ( function = None , language = None ) : def _translation_activate_block ( function ) : def _decorator ( * args , * * kwargs ) : tmp_language = translation . get_language ( ) try : translation . activate ( language or settings . LANGUAGE_CODE ) return function ( * args , * * kwargs ) finally : translation . activate ( tmp_language ) return wraps ( function ) ( _decorator ) if function : return _translation_activate_block ( function ) else : return _translation_activate_block
3,698
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/decorators.py#L26-L44
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Get data on when a UV protection window is .
async def uv_protection_window ( self , low : float = 3.5 , high : float = 3.5 ) -> dict : return await self . request ( 'get' , 'protection' , params = { 'from' : str ( low ) , 'to' : str ( high ) } )
3,699
https://github.com/bachya/pyopenuv/blob/f7c2f9dd99dd4e3b8b1f9e501ea17ce62a7ace46/pyopenuv/client.py#L69-L76
[ "def", "purgeObject", "(", "self", ",", "pid", ",", "logMessage", "=", "None", ")", ":", "http_args", "=", "{", "}", "if", "logMessage", ":", "http_args", "[", "'logMessage'", "]", "=", "logMessage", "url", "=", "'objects/%(pid)s'", "%", "{", "'pid'", ":", "pid", "}", "return", "self", ".", "delete", "(", "url", ",", "params", "=", "http_args", ")" ]