query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Drop variant sequences which are shorter than request or don t have enough supporting reads .
def filter_variant_sequences ( variant_sequences , preferred_sequence_length , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , ) : variant_sequences = trim_variant_sequences ( variant_sequences , min_variant_sequence_coverage ) return filter_variant_sequences_by_length ( variant_sequences = variant_sequences , preferred_sequence_length = preferred_sequence_length )
1,300
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L339-L352
[ "def", "_evaluate_group_dataframe", "(", "self", ",", "gdf", ")", ":", "gdf", ".", "_is_copy", "=", "None", "result_index", "=", "gdf", ".", "index", "if", "self", ".", "keep_index", "else", "[", "]", "data", "=", "pd", ".", "DataFrame", "(", "index", "=", "result_index", ")", "for", "expr", "in", "self", ".", "expressions", ":", "value", "=", "expr", ".", "evaluate", "(", "gdf", ",", "self", ".", "env", ")", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ")", ":", "data", "=", "value", "break", "else", ":", "_create_column", "(", "data", ",", "expr", ".", "column", ",", "value", ")", "data", "=", "_add_group_columns", "(", "data", ",", "gdf", ")", "return", "data" ]
For each variant collect all possible sequence contexts around the variant which are spanned by at least min_reads .
def reads_generator_to_sequences_generator ( variant_and_reads_generator , min_alt_rna_reads = MIN_ALT_RNA_READS , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , preferred_sequence_length = VARIANT_SEQUENCE_LENGTH , variant_sequence_assembly = VARIANT_SEQUENCE_ASSEMBLY ) : for variant , variant_reads in variant_and_reads_generator : variant_sequences = reads_to_variant_sequences ( variant = variant , reads = variant_reads , min_alt_rna_reads = min_alt_rna_reads , min_variant_sequence_coverage = min_variant_sequence_coverage , preferred_sequence_length = preferred_sequence_length , variant_sequence_assembly = variant_sequence_assembly ) yield variant , variant_sequences
1,301
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L474-L516
[ "def", "_ignore_keys", "(", "endpoint_props", ")", ":", "return", "dict", "(", "(", "prop_name", ",", "prop_val", ")", "for", "prop_name", ",", "prop_val", "in", "six", ".", "iteritems", "(", "endpoint_props", ")", "if", "prop_name", "not", "in", "_DO_NOT_COMPARE_FIELDS", ")" ]
Is the other VariantSequence a subsequence of this one?
def contains ( self , other ) : return ( self . alt == other . alt and self . prefix . endswith ( other . prefix ) and self . suffix . startswith ( other . suffix ) )
1,302
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L67-L77
[ "def", "download_stories", "(", "self", ",", "userids", ":", "Optional", "[", "List", "[", "Union", "[", "int", ",", "Profile", "]", "]", "]", "=", "None", ",", "fast_update", ":", "bool", "=", "False", ",", "filename_target", ":", "Optional", "[", "str", "]", "=", "':stories'", ",", "storyitem_filter", ":", "Optional", "[", "Callable", "[", "[", "StoryItem", "]", ",", "bool", "]", "]", "=", "None", ")", "->", "None", ":", "if", "not", "userids", ":", "self", ".", "context", ".", "log", "(", "\"Retrieving all visible stories...\"", ")", "else", ":", "userids", "=", "[", "p", "if", "isinstance", "(", "p", ",", "int", ")", "else", "p", ".", "userid", "for", "p", "in", "userids", "]", "for", "user_story", "in", "self", ".", "get_stories", "(", "userids", ")", ":", "name", "=", "user_story", ".", "owner_username", "self", ".", "context", ".", "log", "(", "\"Retrieving stories from profile {}.\"", ".", "format", "(", "name", ")", ")", "totalcount", "=", "user_story", ".", "itemcount", "count", "=", "1", "for", "item", "in", "user_story", ".", "get_items", "(", ")", ":", "if", "storyitem_filter", "is", "not", "None", "and", "not", "storyitem_filter", "(", "item", ")", ":", "self", ".", "context", ".", "log", "(", "\"<{} skipped>\"", ".", "format", "(", "item", ")", ",", "flush", "=", "True", ")", "continue", "self", ".", "context", ".", "log", "(", "\"[%3i/%3i] \"", "%", "(", "count", ",", "totalcount", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "count", "+=", "1", "with", "self", ".", "context", ".", "error_catcher", "(", "'Download story from user {}'", ".", "format", "(", "name", ")", ")", ":", "downloaded", "=", "self", ".", "download_storyitem", "(", "item", ",", "filename_target", "if", "filename_target", "else", "name", ")", "if", "fast_update", "and", "not", "downloaded", ":", "break" ]
Does this VariantSequence overlap another on the left side?
def left_overlaps ( self , other , min_overlap_size = 1 ) : if self . alt != other . alt : # allele must match! return False if len ( other . prefix ) > len ( self . prefix ) : # only consider strings that overlap like: # self: ppppAssss # other: ppAsssssss # which excludes cases where the other sequence has a longer # prefix return False elif len ( other . suffix ) < len ( self . suffix ) : # similarly, we throw away cases where the other sequence is shorter # after the alt nucleotides than this sequence return False # is the other sequence a prefix of this sequence? # Example: # p1 a1 s1 = XXXXXXXX Y ZZZZZZ # p2 a2 s2 = XX Y ZZZZZZZZZ # ... # then we can combine them into a longer sequence sequence_overlaps = ( self . prefix . endswith ( other . prefix ) and other . suffix . startswith ( self . suffix ) ) prefix_overlap_size = min ( len ( self . prefix ) , len ( other . prefix ) ) suffix_overlap_size = min ( len ( other . suffix ) , len ( self . suffix ) ) overlap_size = ( prefix_overlap_size + suffix_overlap_size + len ( self . alt ) ) return sequence_overlaps and overlap_size >= min_overlap_size
1,303
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L79-L115
[ "def", "cublasGetStream", "(", "handle", ")", ":", "id", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetStream_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "id", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "id", ".", "value" ]
Create another VariantSequence with more supporting reads .
def add_reads ( self , reads ) : if len ( reads ) == 0 : return self new_reads = self . reads . union ( reads ) if len ( new_reads ) > len ( self . reads ) : return VariantSequence ( prefix = self . prefix , alt = self . alt , suffix = self . suffix , reads = new_reads ) else : return self
1,304
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L117-L131
[ "def", "run_example", "(", ")", ":", "weather", "=", "get_weather_data", "(", "'weather.csv'", ")", "my_turbine", ",", "e126", ",", "dummy_turbine", "=", "initialize_wind_turbines", "(", ")", "calculate_power_output", "(", "weather", ",", "my_turbine", ",", "e126", ",", "dummy_turbine", ")", "plot_or_print", "(", "my_turbine", ",", "e126", ",", "dummy_turbine", ")" ]
When we combine prefix + alt + suffix into a single string what are is base - 0 index interval which gets us back the alt sequence? First returned index is inclusive the second is exclusive .
def variant_indices ( self ) : variant_start_index = len ( self . prefix ) variant_len = len ( self . alt ) variant_end_index = variant_start_index + variant_len return variant_start_index , variant_end_index
1,305
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L169-L178
[ "def", "subscribe", "(", "user_id", ",", "to_all", "=", "False", ",", "campaign_ids", "=", "None", ",", "on_error", "=", "None", ",", "on_success", "=", "None", ")", ":", "__subscription", "(", "user_id", ",", "unsubscribe", "=", "False", ",", "all_campaigns", "=", "to_all", ",", "campaign_ids", "=", "campaign_ids", ",", "on_error", "=", "on_error", ",", "on_success", "=", "on_success", ",", ")" ]
Returns NumPy array indicating number of reads covering each nucleotides of this sequence .
def coverage ( self ) : variant_start_index , variant_end_index = self . variant_indices ( ) n_nucleotides = len ( self ) coverage_array = np . zeros ( n_nucleotides , dtype = "int32" ) for read in self . reads : coverage_array [ max ( 0 , variant_start_index - len ( read . prefix ) ) : min ( n_nucleotides , variant_end_index + len ( read . suffix ) ) ] += 1 return coverage_array
1,306
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L180-L192
[ "def", "GetValues", "(", "self", ",", "fd", ")", ":", "result", "=", "None", "for", "result", "in", "fd", ".", "new_attributes", ".", "get", "(", "self", ",", "[", "]", ")", ":", "# We need to interpolate sub fields in this rdfvalue.", "if", "self", ".", "field_names", ":", "for", "x", "in", "self", ".", "GetSubFields", "(", "result", ",", "self", ".", "field_names", ")", ":", "yield", "x", "else", ":", "yield", "result", "for", "result", "in", "fd", ".", "synced_attributes", ".", "get", "(", "self", ",", "[", "]", ")", ":", "result", "=", "result", ".", "ToRDFValue", "(", ")", "# We need to interpolate sub fields in this rdfvalue.", "if", "result", "is", "not", "None", ":", "if", "self", ".", "field_names", ":", "for", "x", "in", "self", ".", "GetSubFields", "(", "result", ",", "self", ".", "field_names", ")", ":", "yield", "x", "else", ":", "yield", "result", "if", "result", "is", "None", ":", "default", "=", "self", ".", "GetDefault", "(", "fd", ")", "if", "default", "is", "not", "None", ":", "yield", "default" ]
Given the min number of reads overlapping each nucleotide of a variant sequence trim this sequence by getting rid of positions which are overlapped by fewer reads than specified .
def trim_by_coverage ( self , min_reads ) : read_count_array = self . coverage ( ) logger . info ( "Coverage: %s (len=%d)" % ( read_count_array , len ( read_count_array ) ) ) sufficient_coverage_mask = read_count_array >= min_reads sufficient_coverage_indices = np . argwhere ( sufficient_coverage_mask ) if len ( sufficient_coverage_indices ) == 0 : logger . debug ( "No bases in %s have coverage >= %d" % ( self , min_reads ) ) return VariantSequence ( prefix = "" , alt = "" , suffix = "" , reads = self . reads ) variant_start_index , variant_end_index = self . variant_indices ( ) # assuming that coverage drops off monotonically away from # variant nucleotides first_covered_index = sufficient_coverage_indices . min ( ) last_covered_index = sufficient_coverage_indices . max ( ) # adding 1 to last_covered_index since it's an inclusive index # whereas variant_end_index is the end of a half-open interval if ( first_covered_index > variant_start_index or last_covered_index + 1 < variant_end_index ) : # Example: # Nucleotide sequence: # ACCCTTTT|AA|GGCGCGCC # Coverage: # 12222333|44|33333211 # Then the mask for bases covered >= 4x would be: # ________|**|________ # with indices: # first_covered_index = 9 # last_covered_index = 10 # variant_start_index = 9 # variant_end_index = 11 logger . debug ( "Some variant bases in %s don't have coverage >= %d" % ( self , min_reads ) ) return VariantSequence ( prefix = "" , alt = "" , suffix = "" , reads = self . reads ) return VariantSequence ( prefix = self . prefix [ first_covered_index : ] , alt = self . alt , suffix = self . suffix [ : last_covered_index - variant_end_index + 1 ] , reads = self . reads )
1,307
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L200-L242
[ "def", "fetch", "(", "self", ",", "raise_exc", "=", "True", ")", ":", "self", ".", "_request", "(", "GET", ",", "raise_exc", "=", "raise_exc", ")", "# ingests response", "self", ".", "fetched", "=", "True", "return", "self", ".", "state", ".", "copy", "(", ")" ]
Drop all occurrences of N from prefix and suffix nucleotide strings by trimming .
def trim_N_nucleotides ( prefix , suffix ) : if 'N' in prefix : # trim prefix to exclude all occurrences of N rightmost_index = prefix . rfind ( 'N' ) logger . debug ( "Trimming %d nucleotides from read prefix '%s'" , rightmost_index + 1 , prefix ) prefix = prefix [ rightmost_index + 1 : ] if 'N' in suffix : leftmost_index = suffix . find ( 'N' ) logger . debug ( "Trimming %d nucleotides from read suffix '%s'" , len ( suffix ) - leftmost_index , suffix ) suffix = suffix [ : leftmost_index ] return prefix , suffix
1,308
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/string_helpers.py#L23-L44
[ "def", "from_data", "(", "cls", ",", "blob", ")", ":", "version", ",", "data", "=", "decompress_datablob", "(", "DATA_BLOB_MAGIC_RETRY", ",", "blob", ")", "if", "version", "==", "1", ":", "for", "clazz", "in", "cls", ".", "_all_subclasses", "(", ")", ":", "if", "clazz", ".", "__name__", "==", "data", "[", "\"_class_name\"", "]", ":", "return", "clazz", ".", "_from_data_v1", "(", "data", ")", "raise", "Exception", "(", "\"Invalid data blob data or version\"", ")" ]
Depending on how we extract data from pysam we may end up with either a string or a byte array of nucleotides . For consistency and simplicity we want to only use strings in the rest of our code .
def convert_from_bytes_if_necessary ( prefix , suffix ) : if isinstance ( prefix , bytes ) : prefix = prefix . decode ( 'ascii' ) if isinstance ( suffix , bytes ) : suffix = suffix . decode ( 'ascii' ) return prefix , suffix
1,309
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/string_helpers.py#L46-L58
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_done_event", ".", "wait", "(", "MAXINT", ")", "return", "self", ".", "_status", ",", "self", ".", "_exception" ]
Validate operation type .
def publish ( self , data , * * kwargs ) : assert data . get ( 'op' ) in { 'index' , 'create' , 'delete' , 'update' } return super ( Producer , self ) . publish ( data , * * kwargs )
1,310
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L36-L39
[ "def", "encrypt", "(", "self", ",", "plaintext", ",", "nonce", ",", "encoder", "=", "encoding", ".", "RawEncoder", ")", ":", "if", "len", "(", "nonce", ")", "!=", "self", ".", "NONCE_SIZE", ":", "raise", "ValueError", "(", "\"The nonce must be exactly %s bytes long\"", "%", "self", ".", "NONCE_SIZE", ")", "ciphertext", "=", "libnacl", ".", "crypto_box_afternm", "(", "plaintext", ",", "nonce", ",", "self", ".", "_shared_key", ",", ")", "encoded_nonce", "=", "encoder", ".", "encode", "(", "nonce", ")", "encoded_ciphertext", "=", "encoder", ".", "encode", "(", "ciphertext", ")", "return", "EncryptedMessage", ".", "_from_parts", "(", "encoded_nonce", ",", "encoded_ciphertext", ",", "encoder", ".", "encode", "(", "nonce", "+", "ciphertext", ")", ",", ")" ]
Index a record .
def index ( self , record ) : index , doc_type = self . record_to_index ( record ) return self . client . index ( id = str ( record . id ) , version = record . revision_id , version_type = self . _version_type , index = index , doc_type = doc_type , body = self . _prepare_record ( record , index , doc_type ) , )
1,311
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L106-L126
[ "def", "configure_materials_manager", "(", "graph", ",", "key_provider", ")", ":", "if", "graph", ".", "config", ".", "materials_manager", ".", "enable_cache", ":", "return", "CachingCryptoMaterialsManager", "(", "cache", "=", "LocalCryptoMaterialsCache", "(", "graph", ".", "config", ".", "materials_manager", ".", "cache_capacity", ")", ",", "master_key_provider", "=", "key_provider", ",", "max_age", "=", "graph", ".", "config", ".", "materials_manager", ".", "cache_max_age", ",", "max_messages_encrypted", "=", "graph", ".", "config", ".", "materials_manager", ".", "cache_max_messages_encrypted", ",", ")", "return", "DefaultCryptoMaterialsManager", "(", "master_key_provider", "=", "key_provider", ")" ]
Process bulk indexing queue .
def process_bulk_queue ( self , es_bulk_kwargs = None ) : with current_celery_app . pool . acquire ( block = True ) as conn : consumer = Consumer ( connection = conn , queue = self . mq_queue . name , exchange = self . mq_exchange . name , routing_key = self . mq_routing_key , ) req_timeout = current_app . config [ 'INDEXER_BULK_REQUEST_TIMEOUT' ] es_bulk_kwargs = es_bulk_kwargs or { } count = bulk ( self . client , self . _actionsiter ( consumer . iterqueue ( ) ) , stats_only = True , request_timeout = req_timeout , * * es_bulk_kwargs ) consumer . close ( ) return count
1,312
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L166-L193
[ "def", "template", "(", "client", ",", "src", ",", "dest", ",", "paths", ",", "opt", ")", ":", "key_map", "=", "cli_hash", "(", "opt", ".", "key_map", ")", "obj", "=", "{", "}", "for", "path", "in", "paths", ":", "response", "=", "client", ".", "read", "(", "path", ")", "if", "not", "response", ":", "raise", "aomi", ".", "exceptions", ".", "VaultData", "(", "\"Unable to retrieve %s\"", "%", "path", ")", "if", "is_aws", "(", "response", "[", "'data'", "]", ")", "and", "'sts'", "not", "in", "path", ":", "renew_secret", "(", "client", ",", "response", ",", "opt", ")", "for", "s_k", ",", "s_v", "in", "response", "[", "'data'", "]", ".", "items", "(", ")", ":", "o_key", "=", "s_k", "if", "s_k", "in", "key_map", ":", "o_key", "=", "key_map", "[", "s_k", "]", "k_name", "=", "secret_key_name", "(", "path", ",", "o_key", ",", "opt", ")", ".", "lower", "(", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "obj", "[", "k_name", "]", "=", "s_v", "template_obj", "=", "blend_vars", "(", "obj", ",", "opt", ")", "output", "=", "render", "(", "grok_template_file", "(", "src", ")", ",", "template_obj", ")", "write_raw_file", "(", "output", ",", "abspath", "(", "dest", ")", ")" ]
Index record in Elasticsearch asynchronously .
def _bulk_op ( self , record_id_iterator , op_type , index = None , doc_type = None ) : with self . create_producer ( ) as producer : for rec in record_id_iterator : producer . publish ( dict ( id = str ( rec ) , op = op_type , index = index , doc_type = doc_type ) )
1,313
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L209-L225
[ "def", "parse_from_file", "(", "filename", ",", "nodata", "=", "False", ")", ":", "header", "=", "None", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "file", ":", "header", "=", "read_machine_header", "(", "file", ")", "meta_raw", "=", "file", ".", "read", "(", "header", "[", "'meta_len'", "]", ")", "meta", "=", "__parse_meta", "(", "meta_raw", ",", "header", ")", "data", "=", "b''", "if", "not", "nodata", ":", "data", "=", "__decompress", "(", "meta", ",", "file", ".", "read", "(", "header", "[", "'data_len'", "]", ")", ")", "return", "header", ",", "meta", ",", "data" ]
Iterate bulk actions .
def _actionsiter ( self , message_iterator ) : for message in message_iterator : payload = message . decode ( ) try : if payload [ 'op' ] == 'delete' : yield self . _delete_action ( payload ) else : yield self . _index_action ( payload ) message . ack ( ) except NoResultFound : message . reject ( ) except Exception : message . reject ( ) current_app . logger . error ( "Failed to index record {0}" . format ( payload . get ( 'id' ) ) , exc_info = True )
1,314
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L227-L246
[ "def", "ReleaseFileObject", "(", "self", ",", "file_object", ")", ":", "identifier", ",", "cache_value", "=", "self", ".", "_file_object_cache", ".", "GetCacheValueByObject", "(", "file_object", ")", "if", "not", "identifier", ":", "raise", "RuntimeError", "(", "'Object not cached.'", ")", "if", "not", "cache_value", ":", "raise", "RuntimeError", "(", "'Invalid cache value.'", ")", "self", ".", "_file_object_cache", ".", "ReleaseObject", "(", "identifier", ")", "result", "=", "cache_value", ".", "IsDereferenced", "(", ")", "if", "result", ":", "self", ".", "_file_object_cache", ".", "RemoveObject", "(", "identifier", ")", "return", "result" ]
Bulk delete action .
def _delete_action ( self , payload ) : index , doc_type = payload . get ( 'index' ) , payload . get ( 'doc_type' ) if not ( index and doc_type ) : record = Record . get_record ( payload [ 'id' ] ) index , doc_type = self . record_to_index ( record ) return { '_op_type' : 'delete' , '_index' : index , '_type' : doc_type , '_id' : payload [ 'id' ] , }
1,315
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L248-L264
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Bulk index action .
def _index_action ( self , payload ) : record = Record . get_record ( payload [ 'id' ] ) index , doc_type = self . record_to_index ( record ) return { '_op_type' : 'index' , '_index' : index , '_type' : doc_type , '_id' : str ( record . id ) , '_version' : record . revision_id , '_version_type' : self . _version_type , '_source' : self . _prepare_record ( record , index , doc_type ) , }
1,316
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L266-L283
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Prepare record data for indexing .
def _prepare_record ( record , index , doc_type ) : if current_app . config [ 'INDEXER_REPLACE_REFS' ] : data = copy . deepcopy ( record . replace_refs ( ) ) else : data = record . dumps ( ) data [ '_created' ] = pytz . utc . localize ( record . created ) . isoformat ( ) if record . created else None data [ '_updated' ] = pytz . utc . localize ( record . updated ) . isoformat ( ) if record . updated else None # Allow modification of data prior to sending to Elasticsearch. before_record_index . send ( current_app . _get_current_object ( ) , json = data , record = record , index = index , doc_type = doc_type , ) return data
1,317
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L286-L313
[ "def", "destroy_sns_event", "(", "app_name", ",", "env", ",", "region", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "sns_client", "=", "session", ".", "client", "(", "'sns'", ")", "lambda_subscriptions", "=", "get_sns_subscriptions", "(", "app_name", "=", "app_name", ",", "env", "=", "env", ",", "region", "=", "region", ")", "for", "subscription_arn", "in", "lambda_subscriptions", ":", "sns_client", ".", "unsubscribe", "(", "SubscriptionArn", "=", "subscription_arn", ")", "LOG", ".", "debug", "(", "\"Lambda SNS event deleted\"", ")", "return", "True" ]
Returns a list of merged VariantSequence objects and True if any were successfully merged .
def greedy_merge_helper ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : merged_variant_sequences = { } merged_any = False # here we'll keep track of sequences that haven't been merged yet, and add them in at the end unmerged_variant_sequences = set ( variant_sequences ) for i in range ( len ( variant_sequences ) ) : sequence1 = variant_sequences [ i ] # it works to loop over the triangle (i+1 onwards) because combine() tries flipping the # arguments if sequence1 is on the right of sequence2 for j in range ( i + 1 , len ( variant_sequences ) ) : sequence2 = variant_sequences [ j ] combined = sequence1 . combine ( sequence2 ) if combined is None : continue if combined . sequence in merged_variant_sequences : existing = merged_variant_sequences [ combined . sequence ] # the existing VariantSequence and the newly merged # VariantSequence should differ only in which reads support them combined = combined . add_reads ( existing . reads ) merged_variant_sequences [ combined . sequence ] = combined unmerged_variant_sequences . discard ( sequence1 ) unmerged_variant_sequences . discard ( sequence2 ) merged_any = True result = list ( merged_variant_sequences . values ( ) ) + list ( unmerged_variant_sequences ) return result , merged_any
1,318
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L27-L58
[ "def", "roots_in_unit_interval", "(", "coeffs", ")", ":", "all_roots", "=", "polynomial", ".", "polyroots", "(", "coeffs", ")", "# Only keep roots inside or very near to the unit interval.", "all_roots", "=", "all_roots", "[", "(", "_UNIT_INTERVAL_WIGGLE_START", "<", "all_roots", ".", "real", ")", "&", "(", "all_roots", ".", "real", "<", "_UNIT_INTERVAL_WIGGLE_END", ")", "]", "# Only keep roots with very small imaginary part. (Really only", "# keep the real parts.)", "real_inds", "=", "np", ".", "abs", "(", "all_roots", ".", "imag", ")", "<", "_IMAGINARY_WIGGLE", "return", "all_roots", "[", "real_inds", "]", ".", "real" ]
Greedily merge overlapping sequences into longer sequences .
def greedy_merge ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : merged_any = True while merged_any : variant_sequences , merged_any = greedy_merge_helper ( variant_sequences , min_overlap_size = min_overlap_size ) return variant_sequences
1,319
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L60-L76
[ "def", "get_subscription_by_channel_id_and_endpoint_id", "(", "self", ",", "channel_id", ",", "endpoint_id", ")", ":", "subscriptions", "=", "self", ".", "search_subscriptions", "(", "channel_id", "=", "channel_id", ",", "endpoint_id", "=", "endpoint_id", ")", "try", ":", "return", "subscriptions", "[", "0", "]", "except", "IndexError", ":", "raise", "DataFailureException", "(", "url", ",", "404", ",", "\"No subscription found\"", ")" ]
Combine shorter sequences which are fully contained in longer sequences .
def collapse_substrings ( variant_sequences ) : if len ( variant_sequences ) <= 1 : # if we don't have at least two VariantSequences then just # return your input return variant_sequences # dictionary mapping VariantSequence objects to lists of reads # they absorb from substring VariantSequences extra_reads_from_substrings = defaultdict ( set ) result_list = [ ] # sort by longest to shortest total length for short_variant_sequence in sorted ( variant_sequences , key = lambda seq : - len ( seq ) ) : found_superstring = False for long_variant_sequence in result_list : found_superstring = long_variant_sequence . contains ( short_variant_sequence ) if found_superstring : extra_reads_from_substrings [ long_variant_sequence ] . update ( short_variant_sequence . reads ) if not found_superstring : result_list . append ( short_variant_sequence ) # add to each VariantSequence the reads it absorbed from dropped substrings # and then return return [ variant_sequence . add_reads ( extra_reads_from_substrings [ variant_sequence ] ) for variant_sequence in result_list ]
1,320
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L78-L116
[ "def", "nl_socket_modify_err_cb", "(", "sk", ",", "kind", ",", "func", ",", "arg", ")", ":", "return", "int", "(", "nl_cb_err", "(", "sk", ".", "s_cb", ",", "kind", ",", "func", ",", "arg", ")", ")" ]
Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them .
def iterative_overlap_assembly ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : if len ( variant_sequences ) <= 1 : # if we don't have at least two sequences to start with then # skip the whole mess below return variant_sequences # reduce the number of inputs to the merge algorithm by first collapsing # shorter sequences onto the longer sequences which contain them n_before_collapse = len ( variant_sequences ) variant_sequences = collapse_substrings ( variant_sequences ) n_after_collapse = len ( variant_sequences ) logger . info ( "Collapsed %d -> %d sequences" , n_before_collapse , n_after_collapse ) merged_variant_sequences = greedy_merge ( variant_sequences , min_overlap_size ) return list ( sorted ( merged_variant_sequences , key = lambda seq : - len ( seq . reads ) ) )
1,321
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L118-L146
[ "def", "server_error", "(", "request", ",", "template_name", "=", "\"errors/500.html\"", ")", ":", "context", "=", "{", "\"STATIC_URL\"", ":", "settings", ".", "STATIC_URL", "}", "t", "=", "get_template", "(", "template_name", ")", "return", "HttpResponseServerError", "(", "t", ".", "render", "(", "context", ",", "request", ")", ")" ]
Group elements of the list xs by keys generated from calling key_fn .
def groupby ( xs , key_fn ) : result = defaultdict ( list ) for x in xs : key = key_fn ( x ) result [ key ] . append ( x ) return result
1,322
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/common.py#L27-L37
[ "def", "InitializeFD", "(", "self", ",", "Channel", ",", "BitrateFD", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_InitializeFD", "(", "Channel", ",", "BitrateFD", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.InitializeFD\"", ")", "raise" ]
Generates an orthonormal basis in the plane perpendicular to normal
def ortho_basis ( normal , ref_vec = None ) : # Imports for library functions import numpy as np from scipy import linalg as spla from scipy import random as sprnd from . . const import PRM from . . error import VectorError # Internal parameters # Magnitude of the perturbation from 'normal' in constructing a random rv RAND_MAG = 0.25 # Test 'normal' for shape and length if not len ( normal . shape ) == 1 : raise ValueError ( "'normal' is not a vector" ) ## end if if not normal . shape [ 0 ] == 3 : raise ValueError ( "Length of 'normal' is not three" ) ## end if # Normalize to concise variable 'nv' nv = normal / spla . norm ( normal ) # Test for specification of ref_vec in the function call if ref_vec is None : # ref_vec not specified. # # Generate reference vector by generation of a random perturbation # vector suitably non-parallel to norm_vec # Generate suitable randomizer, looping as needed rv = nv while parallel_check ( nv , rv ) : rv = np . float64 ( 1.0 - RAND_MAG + 2 * RAND_MAG * sprnd . rand ( 3 ) ) ## do loop # Calculate rejection of perturbed vector on the normal, then # normalize rv = rej ( rv , nv ) rv = rv / spla . norm ( rv ) else : # ref_vec specified, go ahead and use. Start with validity check. if not len ( ref_vec . shape ) == 1 : raise ValueError ( "ref_vec is not a vector" ) ## end if if not ref_vec . shape [ 0 ] == 3 : raise ValueError ( "ref_vec length is not three" ) ## end if # Normalize ref_vec to 'rv' rv = ref_vec / spla . norm ( ref_vec ) # Check for collinearity of nv and rv; raise error if too close if parallel_check ( nv , rv ) : # Essentially equal or opposite vectors, making them too nearly # parallel. raise VectorError ( VectorError . NONPRL , "'normal' and 'ref_vec' are too nearly parallel." , "" ) ## end if # rv is ok to use from here ## end try # on2 is the unit vector parallel to nv x rv on2 = np . cross ( nv , rv ) on2 = on2 / spla . norm ( on2 ) # on1 is on2 x nv (normalization should not be necessary here, but is # performed just in case) on1 = np . cross ( on2 , nv ) on1 = on1 / spla . norm ( on1 ) # Return the spanning vectors return on1 , on2
1,323
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L50-L174
[ "def", "define_from_values", "(", "cls", ",", "xdtu", ",", "ydtu", ",", "zdtu", ",", "xdtu_0", ",", "ydtu_0", ",", "zdtu_0", ")", ":", "self", "=", "DtuConfiguration", "(", ")", "# define DTU variables", "self", ".", "xdtu", "=", "xdtu", "self", ".", "ydtu", "=", "ydtu", "self", ".", "zdtu", "=", "zdtu", "self", ".", "xdtu_0", "=", "xdtu_0", "self", ".", "ydtu_0", "=", "ydtu_0", "self", ".", "zdtu_0", "=", "zdtu_0", "return", "self" ]
Checks orthonormality of the column vectors of a matrix .
def orthonorm_check ( a , tol = _DEF . ORTHONORM_TOL , report = False ) : # Imports import numpy as np from . base import delta_fxn #!TODO? orthonorm_check Must add traps to ensure a is a single array, # that it is 2D, that it's all real? To enforce the limits stated # in the docstring? # Initialize return variables orth = True n_fail = [ ] o_fail = [ ] # Coerce to float_ matrix. Must treat 1-D vector as column vector. # Should raise an exception for any objects with more than # two dimensions; real and all-numeric are still not yet checked, but # will probably be run-time caught if too bad an object is passed. if len ( a . shape ) == 1 : a_mx = np . matrix ( a , dtype = np . float_ ) . T else : a_mx = np . matrix ( a , dtype = np . float_ ) # Split matrix into separate vectors for convenient indexing. a_split = np . hsplit ( a_mx , a_mx . shape [ 1 ] ) # Loop over vectors and check orthonormality. for iter1 in range ( a_mx . shape [ 1 ] ) : for iter2 in range ( iter1 , a_mx . shape [ 1 ] ) : if not abs ( ( a_split [ iter1 ] . T * a_split [ iter2 ] ) [ 0 , 0 ] - np . float_ ( delta_fxn ( iter1 , iter2 ) ) ) <= tol : orth = False if report : if iter1 == iter2 : n_fail . append ( iter1 ) else : o_fail . append ( ( iter1 , iter2 ) ) # Return results if report : return orth , n_fail , o_fail else : return orth , None , None
1,324
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L179-L282
[ "def", "_structlog_default_keys_processor", "(", "logger_class", ",", "log_method", ",", "event", ")", ":", "global", "HOSTNAME", "if", "'id'", "not", "in", "event", ":", "event", "[", "'id'", "]", "=", "'%s_%s'", "%", "(", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y%m%dT%H%M%S'", ")", ",", "uuid", ".", "uuid1", "(", ")", ".", "hex", ")", "if", "'type'", "not", "in", "event", ":", "event", "[", "'type'", "]", "=", "'log'", "event", "[", "'host'", "]", "=", "HOSTNAME", "return", "event" ]
Checks whether two vectors are parallel OR anti - parallel .
def parallel_check ( vec1 , vec2 ) : # Imports from . . const import PRM import numpy as np # Initialize False par = False # Shape check for n , v in enumerate ( [ vec1 , vec2 ] ) : if not len ( v . shape ) == 1 : raise ValueError ( "Bad shape for vector #{0}" . format ( n ) ) ## end if ## next v,n if not vec1 . shape [ 0 ] == vec2 . shape [ 0 ] : raise ValueError ( "Vector length mismatch" ) ## end if # Check for (anti-)parallel character and return angle = vec_angle ( vec1 , vec2 ) if min ( [ abs ( angle ) , abs ( angle - 180. ) ] ) < PRM . NON_PARALLEL_TOL : par = True ## end if return par
1,325
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L288-L335
[ "async", "def", "activate", "(", "cls", ",", "access_key", ":", "str", ")", "->", "dict", ":", "q", "=", "'mutation($access_key: String!, $input: ModifyKeyPairInput!) {'", "+", "' modify_keypair(access_key: $access_key, props: $input) {'", "' ok msg'", "' }'", "'}'", "variables", "=", "{", "'access_key'", ":", "access_key", ",", "'input'", ":", "{", "'is_active'", ":", "True", ",", "'is_admin'", ":", "None", ",", "'resource_policy'", ":", "None", ",", "'rate_limit'", ":", "None", ",", "}", ",", "}", "rqst", "=", "Request", "(", "cls", ".", "session", ",", "'POST'", ",", "'/admin/graphql'", ")", "rqst", ".", "set_json", "(", "{", "'query'", ":", "q", ",", "'variables'", ":", "variables", ",", "}", ")", "async", "with", "rqst", ".", "fetch", "(", ")", "as", "resp", ":", "data", "=", "await", "resp", ".", "json", "(", ")", "return", "data", "[", "'modify_keypair'", "]" ]
Vector projection .
def proj ( vec , vec_onto ) : # Imports import numpy as np # Ensure vectors if not len ( vec . shape ) == 1 : raise ValueError ( "'vec' is not a vector" ) ## end if if not len ( vec_onto . shape ) == 1 : raise ValueError ( "'vec_onto' is not a vector" ) ## end if if not vec . shape [ 0 ] == vec_onto . shape [ 0 ] : raise ValueError ( "Shape mismatch between vectors" ) ## end if # Calculate the projection and return proj_vec = np . float_ ( np . asscalar ( np . dot ( vec . T , vec_onto ) ) ) / np . float_ ( np . asscalar ( np . dot ( vec_onto . T , vec_onto ) ) ) * vec_onto return proj_vec
1,326
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L341-L386
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Vector rejection .
def rej ( vec , vec_onto ) : # Imports import numpy as np # Calculate and return. rej_vec = vec - proj ( vec , vec_onto ) return rej_vec
1,327
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L392-L426
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Angle between two R - dimensional vectors .
def vec_angle ( vec1 , vec2 ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM # Check shape and equal length if len ( vec1 . shape ) != 1 : raise ValueError ( "'vec1' is not a vector" ) ## end if if len ( vec2 . shape ) != 1 : raise ValueError ( "'vec2' is not a vector" ) ## end if if vec1 . shape [ 0 ] != vec2 . shape [ 0 ] : raise ValueError ( "Vector lengths are not equal" ) ## end if # Check magnitudes if spla . norm ( vec1 ) < PRM . ZERO_VEC_TOL : raise ValueError ( "'vec1' norm is too small" ) ## end if if spla . norm ( vec2 ) < PRM . ZERO_VEC_TOL : raise ValueError ( "'vec2' norm is too small" ) ## end if # Calculate the angle and return. Do in multiple steps to test for # possible >1 or <-1 values from numerical precision errors. dotp = np . dot ( vec1 , vec2 ) / spla . norm ( vec1 ) / spla . norm ( vec2 ) if dotp > 1 : angle = 0. # pragma: no cover elif dotp < - 1 : angle = 180. # pragma: no cover else : angle = np . degrees ( np . arccos ( dotp ) ) ## end if return angle
1,328
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L432-L499
[ "def", "remove_selection", "(", "self", ",", "sel", ")", ":", "self", ".", "_selections", ".", "remove", "(", "sel", ")", "# <artist>.figure will be unset so we save them first.", "figures", "=", "{", "artist", ".", "figure", "for", "artist", "in", "[", "sel", ".", "annotation", "]", "+", "sel", ".", "extras", "}", "# ValueError is raised if the artist has already been removed.", "with", "suppress", "(", "ValueError", ")", ":", "sel", ".", "annotation", ".", "remove", "(", ")", "for", "artist", "in", "sel", ".", "extras", ":", "with", "suppress", "(", "ValueError", ")", ":", "artist", ".", "remove", "(", ")", "for", "cb", "in", "self", ".", "_callbacks", "[", "\"remove\"", "]", ":", "cb", "(", "sel", ")", "for", "figure", "in", "figures", ":", "figure", ".", "canvas", ".", "draw_idle", "(", ")" ]
Do all of the gruntwork associated with creating a new module .
def new_module ( name ) : parent = None if '.' in name : parent_name = name . rsplit ( '.' , 1 ) [ 0 ] parent = __import__ ( parent_name , fromlist = [ '' ] ) module = imp . new_module ( name ) sys . modules [ name ] = module if parent : setattr ( parent , name . rsplit ( '.' , 1 ) [ 1 ] , module ) return module
1,329
https://github.com/matthewwithanm/django-classbasedsettings/blob/ac9e4362bd1f4954f3e4679b97726cab2b22aea9/cbsettings/importers.py#L5-L19
[ "def", "extract_source_planes_strikes_dips", "(", "src", ")", ":", "if", "\"characteristicFaultSource\"", "not", "in", "src", ".", "tag", ":", "strikes", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_STRIKES_PARAM", "]", ")", "dips", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_DIPS_PARAM", "]", ")", "return", "strikes", ",", "dips", "tags", "=", "get_taglist", "(", "src", ")", "surface_set", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"surface\"", ")", "]", "strikes", "=", "[", "]", "dips", "=", "[", "]", "num_planes", "=", "0", "for", "surface", "in", "surface_set", ":", "if", "\"planarSurface\"", "in", "surface", ".", "tag", ":", "strikes", ".", "append", "(", "float", "(", "surface", ".", "attrib", "[", "\"strike\"", "]", ")", ")", "dips", ".", "append", "(", "float", "(", "surface", ".", "attrib", "[", "\"dip\"", "]", ")", ")", "num_planes", "+=", "1", "if", "num_planes", ">", "MAX_PLANES", ":", "raise", "ValueError", "(", "\"Number of planes in sourcs %s exceededs maximum \"", "\"of %s\"", "%", "(", "str", "(", "num_planes", ")", ",", "str", "(", "MAX_PLANES", ")", ")", ")", "if", "num_planes", ":", "strikes", "=", "expand_src_param", "(", "strikes", ",", "PLANES_STRIKES_PARAM", ")", "dips", "=", "expand_src_param", "(", "dips", ",", "PLANES_DIPS_PARAM", ")", "else", ":", "strikes", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_STRIKES_PARAM", "]", ")", "dips", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_DIPS_PARAM", "]", ")", "return", "strikes", ",", "dips" ]
Creates a DataFrame containing number of reads supporting the ref vs . alt alleles for each variant .
def allele_counts_dataframe ( variant_and_allele_reads_generator ) : df_builder = DataFrameBuilder ( AlleleCount , extra_column_fns = { "gene" : lambda variant , _ : ";" . join ( variant . gene_names ) , } ) for variant , allele_reads in variant_and_allele_reads_generator : counts = count_alleles_at_variant_locus ( variant , allele_reads ) df_builder . add ( variant , counts ) return df_builder . to_dataframe ( )
1,330
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_counts.py#L46-L59
[ "def", "on_exception", "(", "self", ",", "exception", ")", ":", "logger", ".", "error", "(", "'Exception from stream!'", ",", "exc_info", "=", "True", ")", "self", ".", "streaming_exception", "=", "exception" ]
Install Postgres extension .
def install_extension ( conn , extension : str ) : query = 'CREATE EXTENSION IF NOT EXISTS "%s";' with conn . cursor ( ) as cursor : cursor . execute ( query , ( AsIs ( extension ) , ) ) installed = check_extension ( conn , extension ) if not installed : raise psycopg2 . ProgrammingError ( 'Postgres extension failed installation.' , extension )
1,331
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/extensions.py#L5-L18
[ "def", "delete_managed_disk", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "# pylint: disable=unused-argument", "compconn", "=", "get_conn", "(", "client_type", "=", "'compute'", ")", "try", ":", "compconn", ".", "disks", ".", "delete", "(", "kwargs", "[", "'resource_group'", "]", ",", "kwargs", "[", "'blob'", "]", ")", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'Error deleting managed disk %s - %s'", ",", "kwargs", ".", "get", "(", "'blob'", ")", ",", "six", ".", "text_type", "(", "exc", ")", ")", "return", "False", "return", "True" ]
Check to see if an extension is installed .
def check_extension ( conn , extension : str ) -> bool : query = 'SELECT installed_version FROM pg_available_extensions WHERE name=%s;' with conn . cursor ( ) as cursor : cursor . execute ( query , ( extension , ) ) result = cursor . fetchone ( ) if result is None : raise psycopg2 . ProgrammingError ( 'Extension is not available for installation.' , extension ) else : extension_version = result [ 0 ] return bool ( extension_version )
1,332
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/extensions.py#L21-L37
[ "def", "on_response", "(", "self", ",", "ch", ",", "method_frame", ",", "props", ",", "body", ")", ":", "LOGGER", ".", "debug", "(", "\"rabbitmq.Requester.on_response\"", ")", "if", "self", ".", "corr_id", "==", "props", ".", "correlation_id", ":", "self", ".", "response", "=", "{", "'props'", ":", "props", ",", "'body'", ":", "body", "}", "else", ":", "LOGGER", ".", "warn", "(", "\"rabbitmq.Requester.on_response - discarded response : \"", "+", "str", "(", "props", ".", "correlation_id", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "props", ",", "'body'", ":", "body", "}", ")", ")" ]
Ensure obj is iterable .
def make_iterable ( obj , default = None ) : if obj is None : return default or [ ] if isinstance ( obj , ( compat . string_types , compat . integer_types ) ) : return [ obj ] return obj
1,333
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L12-L18
[ "def", "login", "(", "self", ")", ":", "access_token", "=", "self", ".", "_get_access_token", "(", ")", "try", ":", "super", "(", "IAMSession", ",", "self", ")", ".", "request", "(", "'POST'", ",", "self", ".", "_session_url", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "'access_token'", ":", "access_token", "}", ")", ")", ".", "raise_for_status", "(", ")", "except", "RequestException", ":", "raise", "CloudantException", "(", "'Failed to exchange IAM token with Cloudant'", ")" ]
Return an iterator over corpus documents .
def iter_documents ( self , fileids = None , categories = None , _destroy = False ) : doc_ids = self . _filter_ids ( fileids , categories ) for doc in imap ( self . get_document , doc_ids ) : yield doc if _destroy : doc . destroy ( )
1,334
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L176-L182
[ "def", "disassociate_hosting_device_with_config_agent", "(", "self", ",", "client", ",", "config_agent_id", ",", "hosting_device_id", ")", ":", "return", "client", ".", "delete", "(", "(", "ConfigAgentHandlingHostingDevice", ".", "resource_path", "+", "CFG_AGENT_HOSTING_DEVICES", "+", "\"/%s\"", ")", "%", "(", "config_agent_id", ",", "hosting_device_id", ")", ")" ]
Try to dump metadata to a file .
def _create_meta_cache ( self ) : try : with open ( self . _cache_filename , 'wb' ) as f : compat . pickle . dump ( self . _document_meta , f , 1 ) except ( IOError , compat . pickle . PickleError ) : pass
1,335
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L295-L301
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Try to load metadata from file .
def _load_meta_cache ( self ) : try : if self . _should_invalidate_cache ( ) : os . remove ( self . _cache_filename ) else : with open ( self . _cache_filename , 'rb' ) as f : self . _document_meta = compat . pickle . load ( f ) except ( OSError , IOError , compat . pickle . PickleError , ImportError , AttributeError ) : pass
1,336
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L303-L313
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Return documents meta information that can be used for fast document lookups . Meta information consists of documents titles categories and positions in file .
def _compute_document_meta ( self ) : meta = OrderedDict ( ) bounds_iter = xml_utils . bounds ( self . filename , start_re = r'<text id="(\d+)"[^>]*name="([^"]*)"' , end_re = r'</text>' ) for match , bounds in bounds_iter : doc_id , title = str ( match . group ( 1 ) ) , match . group ( 2 ) title = xml_utils . unescape_attribute ( title ) # cache categories xml_data = xml_utils . load_chunk ( self . filename , bounds ) doc = Document ( compat . ElementTree . XML ( xml_data . encode ( 'utf8' ) ) ) meta [ doc_id ] = _DocumentMeta ( title , bounds , doc . categories ( ) ) return meta
1,337
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L320-L342
[ "def", "value", "(", "self", ")", ":", "val", "=", "{", "}", "for", "k", "in", "self", ".", "__allowed_keys", ":", "value", "=", "getattr", "(", "self", ",", "\"_\"", "+", "k", ")", "if", "value", "is", "not", "None", ":", "val", "[", "k", "]", "=", "value", "return", "val" ]
Return xml Element for the document document_id .
def _document_xml ( self , doc_id ) : doc_str = self . _get_doc_by_raw_offset ( str ( doc_id ) ) return compat . ElementTree . XML ( doc_str . encode ( 'utf8' ) )
1,338
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L344-L347
[ "def", "bind", "(", "self", ")", ":", "conn", "=", "self", ".", "initialize", "try", ":", "conn", ".", "simple_bind_s", "(", "current_app", ".", "config", "[", "'LDAP_USERNAME'", "]", ",", "current_app", ".", "config", "[", "'LDAP_PASSWORD'", "]", ")", "return", "conn", "except", "ldap", ".", "LDAPError", "as", "e", ":", "raise", "LDAPException", "(", "self", ".", "error", "(", "e", ".", "args", ")", ")" ]
Load document from xml using line offset information . This is much slower than _get_doc_by_raw_offset but should work everywhere .
def _get_doc_by_line_offset ( self , doc_id ) : bounds = self . _get_meta ( ) [ str ( doc_id ) ] . bounds return xml_utils . load_chunk ( self . filename , bounds , slow = True )
1,339
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L357-L364
[ "def", "_class", "(", "self", ")", ":", "try", ":", "self", ".", "_project_name", "(", ")", "except", "ValueError", ":", "return", "MalformedReq", "if", "self", ".", "_is_satisfied", "(", ")", ":", "return", "SatisfiedReq", "if", "not", "self", ".", "_expected_hashes", "(", ")", ":", "return", "MissingReq", "if", "self", ".", "_actual_hash", "(", ")", "not", "in", "self", ".", "_expected_hashes", "(", ")", ":", "return", "MismatchedReq", "return", "InstallableReq" ]
Return Ma where M is a 3x3 transformation matrix for each pixel
def _threeDdot_simple ( M , a ) : result = np . empty ( a . shape , dtype = a . dtype ) for i in range ( a . shape [ 0 ] ) : for j in range ( a . shape [ 1 ] ) : A = np . array ( [ a [ i , j , 0 ] , a [ i , j , 1 ] , a [ i , j , 2 ] ] ) . reshape ( ( 3 , 1 ) ) L = np . dot ( M , A ) result [ i , j , 0 ] = L [ 0 ] result [ i , j , 1 ] = L [ 1 ] result [ i , j , 2 ] = L [ 2 ] return result
1,340
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L49-L62
[ "def", "ttl", "(", "self", ",", "value", ")", ":", "# get timer", "timer", "=", "getattr", "(", "self", ",", "Annotation", ".", "__TIMER", ",", "None", ")", "# if timer is running, stop the timer", "if", "timer", "is", "not", "None", ":", "timer", ".", "cancel", "(", ")", "# initialize timestamp", "timestamp", "=", "None", "# if value is None", "if", "value", "is", "None", ":", "# nonify timer", "timer", "=", "None", "else", ":", "# else, renew a timer", "# get timestamp", "timestamp", "=", "time", "(", ")", "+", "value", "# start a new timer", "timer", "=", "Timer", "(", "value", ",", "self", ".", "__del__", ")", "timer", ".", "start", "(", ")", "# set/update attributes", "setattr", "(", "self", ",", "Annotation", ".", "__TIMER", ",", "timer", ")", "setattr", "(", "self", ",", "Annotation", ".", "__TS", ",", "timestamp", ")" ]
Reverse the order of an LCH numpy dstack or tuple for analysis .
def _swaplch ( LCH ) : try : # Numpy array L , C , H = np . dsplit ( LCH , 3 ) return np . dstack ( ( H , C , L ) ) except : # Tuple L , C , H = LCH return H , C , L
1,341
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L435-L442
[ "def", "GetAdGroups", "(", "self", ",", "client_customer_id", ",", "campaign_id", ")", ":", "self", ".", "client", ".", "SetClientCustomerId", "(", "client_customer_id", ")", "selector", "=", "{", "'fields'", ":", "[", "'Id'", ",", "'Name'", ",", "'Status'", "]", ",", "'predicates'", ":", "[", "{", "'field'", ":", "'CampaignId'", ",", "'operator'", ":", "'EQUALS'", ",", "'values'", ":", "[", "campaign_id", "]", "}", ",", "{", "'field'", ":", "'Status'", ",", "'operator'", ":", "'NOT_EQUALS'", ",", "'values'", ":", "[", "'REMOVED'", "]", "}", "]", "}", "adgroups", "=", "self", ".", "client", ".", "GetService", "(", "'AdGroupService'", ")", ".", "get", "(", "selector", ")", "if", "int", "(", "adgroups", "[", "'totalNumEntries'", "]", ")", ">", "0", ":", "return", "adgroups", "[", "'entries'", "]", "else", ":", "return", "None" ]
linear rgb to hsv
def rgb_to_hsv ( self , RGB ) : gammaRGB = self . _gamma_rgb ( RGB ) return self . _ABC_to_DEF_by_fn ( gammaRGB , rgb_to_hsv )
1,342
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L413-L416
[ "def", "timed_cache", "(", "*", "*", "timed_cache_kwargs", ")", ":", "def", "_wrapper", "(", "f", ")", ":", "maxsize", "=", "timed_cache_kwargs", ".", "pop", "(", "'maxsize'", ",", "128", ")", "typed", "=", "timed_cache_kwargs", ".", "pop", "(", "'typed'", ",", "False", ")", "update_delta", "=", "timedelta", "(", "*", "*", "timed_cache_kwargs", ")", "# nonlocal workaround to support Python 2", "# https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/", "d", "=", "{", "'next_update'", ":", "datetime", ".", "utcnow", "(", ")", "-", "update_delta", "}", "try", ":", "f", "=", "functools", ".", "lru_cache", "(", "maxsize", "=", "maxsize", ",", "typed", "=", "typed", ")", "(", "f", ")", "except", "AttributeError", ":", "print", "(", "\"LRU caching is not available in Pyton 2.7, \"", "\"this will have no effect!\"", ")", "pass", "@", "functools", ".", "wraps", "(", "f", ")", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "now", ">=", "d", "[", "'next_update'", "]", ":", "try", ":", "f", ".", "cache_clear", "(", ")", "except", "AttributeError", ":", "pass", "d", "[", "'next_update'", "]", "=", "now", "+", "update_delta", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapped", "return", "_wrapper" ]
hsv to linear rgb
def hsv_to_rgb ( self , HSV ) : gammaRGB = self . _ABC_to_DEF_by_fn ( HSV , hsv_to_rgb ) return self . _ungamma_rgb ( gammaRGB )
1,343
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L418-L421
[ "def", "factory", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", ":", "# fileobject from lxml.objectify", "if", "hasattr", "(", "fileobject", ",", "'currentRevision'", ")", ":", "# a normal file", "return", "JFSFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "elif", "str", "(", "fileobject", ".", "latestRevision", ".", "state", ")", "==", "ProtoFile", ".", "STATE_INCOMPLETE", ":", "return", "JFSIncompleteFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "elif", "str", "(", "fileobject", ".", "latestRevision", ".", "state", ")", "==", "ProtoFile", ".", "STATE_CORRUPT", ":", "return", "JFSCorruptFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "else", ":", "raise", "NotImplementedError", "(", "'No JFS*File support for state %r. Please file a bug!'", "%", "fileobject", ".", "latestRevision", ".", "state", ")" ]
Transform images i provided into the specified working color space .
def image2working ( self , i ) : return self . colorspace . convert ( self . image_space , self . working_space , i )
1,344
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L477-L481
[ "def", "get_or_generate_vocabulary", "(", "data_dir", ",", "tmp_dir", ",", "data_prefix", ",", "max_page_size_exp", ",", "approx_vocab_size", "=", "32768", ",", "strip", "=", "True", ")", ":", "num_pages_for_vocab_generation", "=", "approx_vocab_size", "//", "3", "vocab_file", "=", "vocab_filename", "(", "approx_vocab_size", ",", "strip", ")", "def", "my_generator", "(", "data_prefix", ")", ":", "\"\"\"Line generator for vocab.\"\"\"", "count", "=", "0", "for", "page", "in", "corpus_page_generator", "(", "all_corpus_files", "(", "data_prefix", ")", "[", ":", ":", "-", "1", "]", ",", "tmp_dir", ",", "max_page_size_exp", ")", ":", "revisions", "=", "page", "[", "\"revisions\"", "]", "if", "revisions", ":", "text", "=", "get_text", "(", "revisions", "[", "-", "1", "]", ",", "strip", "=", "strip", ")", "yield", "text", "count", "+=", "1", "if", "count", "%", "100", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"reading pages for vocab %d\"", "%", "count", ")", "if", "count", ">", "num_pages_for_vocab_generation", ":", "break", "return", "generator_utils", ".", "get_or_generate_vocab_inner", "(", "data_dir", ",", "vocab_file", ",", "approx_vocab_size", ",", "my_generator", "(", "data_prefix", ")", ")" ]
Transform working space inputs to the analysis color space .
def working2analysis ( self , r ) : a = self . colorspace . convert ( self . working_space , self . analysis_space , r ) return self . swap_polar_HSVorder [ self . analysis_space ] ( a )
1,345
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L483-L486
[ "def", "get_or_generate_vocabulary", "(", "data_dir", ",", "tmp_dir", ",", "data_prefix", ",", "max_page_size_exp", ",", "approx_vocab_size", "=", "32768", ",", "strip", "=", "True", ")", ":", "num_pages_for_vocab_generation", "=", "approx_vocab_size", "//", "3", "vocab_file", "=", "vocab_filename", "(", "approx_vocab_size", ",", "strip", ")", "def", "my_generator", "(", "data_prefix", ")", ":", "\"\"\"Line generator for vocab.\"\"\"", "count", "=", "0", "for", "page", "in", "corpus_page_generator", "(", "all_corpus_files", "(", "data_prefix", ")", "[", ":", ":", "-", "1", "]", ",", "tmp_dir", ",", "max_page_size_exp", ")", ":", "revisions", "=", "page", "[", "\"revisions\"", "]", "if", "revisions", ":", "text", "=", "get_text", "(", "revisions", "[", "-", "1", "]", ",", "strip", "=", "strip", ")", "yield", "text", "count", "+=", "1", "if", "count", "%", "100", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"reading pages for vocab %d\"", "%", "count", ")", "if", "count", ">", "num_pages_for_vocab_generation", ":", "break", "return", "generator_utils", ".", "get_or_generate_vocab_inner", "(", "data_dir", ",", "vocab_file", ",", "approx_vocab_size", ",", "my_generator", "(", "data_prefix", ")", ")" ]
Convert back from the analysis color space to the working space .
def analysis2working ( self , a ) : a = self . swap_polar_HSVorder [ self . analysis_space ] ( a ) return self . colorspace . convert ( self . analysis_space , self . working_space , a )
1,346
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L488-L491
[ "def", "_FindLargestIdPostfixNumber", "(", "self", ",", "schedule", ")", ":", "postfix_number_re", "=", "re", ".", "compile", "(", "'(\\d+)$'", ")", "def", "ExtractPostfixNumber", "(", "entity_id", ")", ":", "\"\"\"Try to extract an integer from the end of entity_id.\n\n If entity_id is None or if there is no integer ending the id, zero is\n returned.\n\n Args:\n entity_id: An id string or None.\n\n Returns:\n An integer ending the entity_id or zero.\n \"\"\"", "if", "entity_id", "is", "None", ":", "return", "0", "match", "=", "postfix_number_re", ".", "search", "(", "entity_id", ")", "if", "match", "is", "not", "None", ":", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")", "else", ":", "return", "0", "id_data_sets", "=", "{", "'agency_id'", ":", "schedule", ".", "GetAgencyList", "(", ")", ",", "'stop_id'", ":", "schedule", ".", "GetStopList", "(", ")", ",", "'route_id'", ":", "schedule", ".", "GetRouteList", "(", ")", ",", "'trip_id'", ":", "schedule", ".", "GetTripList", "(", ")", ",", "'service_id'", ":", "schedule", ".", "GetServicePeriodList", "(", ")", ",", "'fare_id'", ":", "schedule", ".", "GetFareAttributeList", "(", ")", ",", "'shape_id'", ":", "schedule", ".", "GetShapeList", "(", ")", "}", "max_postfix_number", "=", "0", "for", "id_name", ",", "entity_list", "in", "id_data_sets", ".", "items", "(", ")", ":", "for", "entity", "in", "entity_list", ":", "entity_id", "=", "getattr", "(", "entity", ",", "id_name", ")", "postfix_number", "=", "ExtractPostfixNumber", "(", "entity_id", ")", "max_postfix_number", "=", "max", "(", "max_postfix_number", ",", "postfix_number", ")", "return", "max_postfix_number" ]
Load a chunk from file using Bounds info . Pass slow = True for an alternative loading method based on line numbers .
def load_chunk ( filename , bounds , encoding = 'utf8' , slow = False ) : if slow : return _load_chunk_slow ( filename , bounds , encoding ) with open ( filename , 'rb' ) as f : f . seek ( bounds . byte_start ) size = bounds . byte_end - bounds . byte_start return f . read ( size ) . decode ( encoding )
1,347
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/xml_utils.py#L52-L63
[ "def", "delete_category", "(", "self", ",", "category", ")", ":", "# Remove mapping of metrics-to-category", "category_key", "=", "self", ".", "_category_key", "(", "category", ")", "self", ".", "r", ".", "delete", "(", "category_key", ")", "# Remove category from Set", "self", ".", "r", ".", "srem", "(", "self", ".", "_categories_key", ",", "category", ")" ]
Generate postgresql numeric range and label for insertion .
def generate_numeric_range ( items , lower_bound , upper_bound ) : quantile_grid = create_quantiles ( items , lower_bound , upper_bound ) labels , bounds = ( zip ( * quantile_grid ) ) ranges = ( ( label , NumericRange ( * bound ) ) for label , bound in zip ( labels , bounds ) ) return ranges
1,348
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/data_types.py#L30-L44
[ "def", "getAllData", "(", "self", ",", "temp", "=", "True", ",", "accel", "=", "True", ",", "gyro", "=", "True", ")", ":", "allData", "=", "{", "}", "if", "temp", ":", "allData", "[", "\"temp\"", "]", "=", "self", ".", "getTemp", "(", ")", "if", "accel", ":", "allData", "[", "\"accel\"", "]", "=", "self", ".", "getAccelData", "(", "raw", "=", "False", ")", "if", "gyro", ":", "allData", "[", "\"gyro\"", "]", "=", "self", ".", "getGyroData", "(", ")", "return", "allData" ]
Return the mean value around the edge of an array .
def edge_average ( a ) : if len ( np . ravel ( a ) ) < 2 : return float ( a [ 0 ] ) else : top_edge = a [ 0 ] bottom_edge = a [ - 1 ] left_edge = a [ 1 : - 1 , 0 ] right_edge = a [ 1 : - 1 , - 1 ] edge_sum = np . sum ( top_edge ) + np . sum ( bottom_edge ) + np . sum ( left_edge ) + np . sum ( right_edge ) num_values = len ( top_edge ) + len ( bottom_edge ) + len ( left_edge ) + len ( right_edge ) return float ( edge_sum ) / num_values
1,349
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L206-L220
[ "def", "_convert_strls", "(", "self", ",", "data", ")", ":", "convert_cols", "=", "[", "col", "for", "i", ",", "col", "in", "enumerate", "(", "data", ")", "if", "self", ".", "typlist", "[", "i", "]", "==", "32768", "or", "col", "in", "self", ".", "_convert_strl", "]", "if", "convert_cols", ":", "ssw", "=", "StataStrLWriter", "(", "data", ",", "convert_cols", ")", "tab", ",", "new_data", "=", "ssw", ".", "generate_table", "(", ")", "data", "=", "new_data", "self", ".", "_strl_blob", "=", "ssw", ".", "generate_blob", "(", "tab", ")", "return", "data" ]
Add the channel information to the channel_data attribute .
def _process_channels ( self , p , * * params_to_override ) : orig_image = self . _image for i in range ( len ( self . _channel_data ) ) : self . _image = self . _original_channel_data [ i ] self . _channel_data [ i ] = self . _reduced_call ( * * params_to_override ) self . _image = orig_image return self . _channel_data
1,350
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L322-L332
[ "def", "reexport_tf_summary", "(", ")", ":", "import", "sys", "# pylint: disable=g-import-not-at-top", "# API packages to check for the original V2 summary API, in preference order", "# to avoid going \"under the hood\" to the _api packages unless necessary.", "packages", "=", "[", "'tensorflow'", ",", "'tensorflow.compat.v2'", ",", "'tensorflow._api.v2'", ",", "'tensorflow._api.v2.compat.v2'", ",", "'tensorflow._api.v1.compat.v2'", ",", "]", "# If we aren't sure we're on V2, don't use tf.summary since it could be V1.", "# Note there may be false positives since the __version__ attribute may not be", "# defined at this point in the import process.", "if", "not", "getattr", "(", "tf", ",", "'__version__'", ",", "''", ")", ".", "startswith", "(", "'2.'", ")", ":", "# noqa: F821", "packages", ".", "remove", "(", "'tensorflow'", ")", "def", "dynamic_wildcard_import", "(", "module", ")", ":", "\"\"\"Implements the logic of \"from module import *\" for the given module.\"\"\"", "symbols", "=", "getattr", "(", "module", ",", "'__all__'", ",", "None", ")", "if", "symbols", "is", "None", ":", "symbols", "=", "[", "k", "for", "k", "in", "module", ".", "__dict__", ".", "keys", "(", ")", "if", "not", "k", ".", "startswith", "(", "'_'", ")", "]", "globals", "(", ")", ".", "update", "(", "{", "symbol", ":", "getattr", "(", "module", ",", "symbol", ")", "for", "symbol", "in", "symbols", "}", ")", "notfound", "=", "object", "(", ")", "# sentinel value", "for", "package_name", "in", "packages", ":", "package", "=", "sys", ".", "modules", ".", "get", "(", "package_name", ",", "notfound", ")", "if", "package", "is", "notfound", ":", "# Either it isn't in this installation at all (e.g. the _api.vX packages", "# are only in API version X), it isn't imported yet, or it was imported", "# but not inserted into sys.modules under its user-facing name (for the", "# non-'_api' packages), at which point we continue down the list to look", "# \"under the hood\" for it via its '_api' package name.", "continue", "module", "=", "getattr", "(", "package", ",", "'summary'", ",", "None", ")", "if", "module", "is", "None", ":", "# This happens if the package hasn't been fully imported yet. For example,", "# the 'tensorflow' package won't yet have 'summary' attribute if we are", "# loading this code via the 'tensorflow.compat...' path and 'compat' is", "# imported before 'summary' in the 'tensorflow' __init__.py file.", "continue", "# Success, we hope. Import all the public symbols into this module.", "dynamic_wildcard_import", "(", "module", ")", "return" ]
Subclassed to delete the cached image when matrix dimensions are changed .
def set_matrix_dimensions ( self , * args ) : self . _image = None super ( FileImage , self ) . set_matrix_dimensions ( * args )
1,351
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L425-L431
[ "def", "_read_page_header", "(", "file_obj", ")", ":", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "page_header", "=", "parquet_thrift", ".", "PageHeader", "(", ")", "page_header", ".", "read", "(", "pin", ")", "return", "page_header" ]
Load image using PIL .
def _load_pil_image ( self , filename ) : self . _channel_data = [ ] self . _original_channel_data = [ ] im = Image . open ( filename ) self . _image = ImageOps . grayscale ( im ) im . load ( ) file_data = np . asarray ( im , float ) file_data = file_data / file_data . max ( ) # if the image has more than one channel, load them if ( len ( file_data . shape ) == 3 ) : num_channels = file_data . shape [ 2 ] for i in range ( num_channels ) : self . _channel_data . append ( file_data [ : , : , i ] ) self . _original_channel_data . append ( file_data [ : , : , i ] )
1,352
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L450-L469
[ "def", "_cells", "(", "self", ")", ":", "col_count", "=", "self", ".", "_column_count", "cells", "=", "[", "]", "for", "tc", "in", "self", ".", "_tbl", ".", "iter_tcs", "(", ")", ":", "for", "grid_span_idx", "in", "range", "(", "tc", ".", "grid_span", ")", ":", "if", "tc", ".", "vMerge", "==", "ST_Merge", ".", "CONTINUE", ":", "cells", ".", "append", "(", "cells", "[", "-", "col_count", "]", ")", "elif", "grid_span_idx", ">", "0", ":", "cells", ".", "append", "(", "cells", "[", "-", "1", "]", ")", "else", ":", "cells", ".", "append", "(", "_Cell", "(", "tc", ",", "self", ")", ")", "return", "cells" ]
Load image using Numpy .
def _load_npy ( self , filename ) : self . _channel_data = [ ] self . _original_channel_data = [ ] file_channel_data = np . load ( filename ) file_channel_data = file_channel_data / file_channel_data . max ( ) for i in range ( file_channel_data . shape [ 2 ] ) : self . _channel_data . append ( file_channel_data [ : , : , i ] ) self . _original_channel_data . append ( file_channel_data [ : , : , i ] ) self . _image = file_channel_data . sum ( 2 ) / file_channel_data . shape [ 2 ]
1,353
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L472-L485
[ "def", "canonical_stylename", "(", "font", ")", ":", "from", "fontbakery", ".", "constants", "import", "(", "STATIC_STYLE_NAMES", ",", "VARFONT_SUFFIXES", ")", "from", "fontbakery", ".", "profiles", ".", "shared_conditions", "import", "is_variable_font", "from", "fontTools", ".", "ttLib", "import", "TTFont", "# remove spaces in style names", "valid_style_suffixes", "=", "[", "name", ".", "replace", "(", "' '", ",", "''", ")", "for", "name", "in", "STATIC_STYLE_NAMES", "]", "filename", "=", "os", ".", "path", ".", "basename", "(", "font", ")", "basename", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "s", "=", "suffix", "(", "font", ")", "varfont", "=", "os", ".", "path", ".", "exists", "(", "font", ")", "and", "is_variable_font", "(", "TTFont", "(", "font", ")", ")", "if", "(", "'-'", "in", "basename", "and", "(", "s", "in", "VARFONT_SUFFIXES", "and", "varfont", ")", "or", "(", "s", "in", "valid_style_suffixes", "and", "not", "varfont", ")", ")", ":", "return", "s" ]
Helper method for screening keyword arguments
def ok_kwarg ( val ) : import keyword try : return str . isidentifier ( val ) and not keyword . iskeyword ( val ) except TypeError : # Non-string values are never a valid keyword arg return False
1,354
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/decorate.py#L186-L195
[ "def", "sort_response", "(", "response", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "OrderedDict", ":", "root_order", "=", "[", "\"jsonrpc\"", ",", "\"result\"", ",", "\"error\"", ",", "\"id\"", "]", "error_order", "=", "[", "\"code\"", ",", "\"message\"", ",", "\"data\"", "]", "req", "=", "OrderedDict", "(", "sorted", "(", "response", ".", "items", "(", ")", ",", "key", "=", "lambda", "k", ":", "root_order", ".", "index", "(", "k", "[", "0", "]", ")", ")", ")", "if", "\"error\"", "in", "response", ":", "req", "[", "\"error\"", "]", "=", "OrderedDict", "(", "sorted", "(", "response", "[", "\"error\"", "]", ".", "items", "(", ")", ",", "key", "=", "lambda", "k", ":", "error_order", ".", "index", "(", "k", "[", "0", "]", ")", ")", ")", "return", "req" ]
Run bulk record indexing .
def run ( delayed , concurrency , version_type = None , queue = None , raise_on_error = True ) : if delayed : celery_kwargs = { 'kwargs' : { 'version_type' : version_type , 'es_bulk_kwargs' : { 'raise_on_error' : raise_on_error } , } } click . secho ( 'Starting {0} tasks for indexing records...' . format ( concurrency ) , fg = 'green' ) if queue is not None : celery_kwargs . update ( { 'queue' : queue } ) for c in range ( 0 , concurrency ) : process_bulk_queue . apply_async ( * * celery_kwargs ) else : click . secho ( 'Indexing records...' , fg = 'green' ) RecordIndexer ( version_type = version_type ) . process_bulk_queue ( es_bulk_kwargs = { 'raise_on_error' : raise_on_error } )
1,355
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L43-L63
[ "def", "get_closest_sibling_state", "(", "state_m", ",", "from_logical_port", "=", "None", ")", ":", "if", "not", "state_m", ".", "parent", ":", "logger", ".", "warning", "(", "\"A state can not have a closest sibling state if it has not parent as {0}\"", ".", "format", "(", "state_m", ")", ")", "return", "margin", "=", "cal_margin", "(", "state_m", ".", "parent", ".", "get_meta_data_editor", "(", ")", "[", "'size'", "]", ")", "pos", "=", "state_m", ".", "get_meta_data_editor", "(", ")", "[", "'rel_pos'", "]", "size", "=", "state_m", ".", "get_meta_data_editor", "(", ")", "[", "'size'", "]", "# otherwise measure from reference state itself", "if", "from_logical_port", "in", "[", "\"outcome\"", ",", "\"income\"", "]", ":", "size", "=", "(", "margin", ",", "margin", ")", "if", "from_logical_port", "==", "\"outcome\"", ":", "outcomes_m", "=", "[", "outcome_m", "for", "outcome_m", "in", "state_m", ".", "outcomes", "if", "outcome_m", ".", "outcome", ".", "outcome_id", ">=", "0", "]", "free_outcomes_m", "=", "[", "oc_m", "for", "oc_m", "in", "outcomes_m", "if", "not", "state_m", ".", "state", ".", "parent", ".", "get_transition_for_outcome", "(", "state_m", ".", "state", ",", "oc_m", ".", "outcome", ")", "]", "if", "free_outcomes_m", ":", "outcome_m", "=", "free_outcomes_m", "[", "0", "]", "else", ":", "outcome_m", "=", "outcomes_m", "[", "0", "]", "pos", "=", "add_pos", "(", "pos", ",", "outcome_m", ".", "get_meta_data_editor", "(", ")", "[", "'rel_pos'", "]", ")", "elif", "from_logical_port", "==", "\"income\"", ":", "pos", "=", "add_pos", "(", "pos", ",", "state_m", ".", "income", ".", "get_meta_data_editor", "(", ")", "[", "'rel_pos'", "]", ")", "min_distance", "=", "None", "for", "sibling_state_m", "in", "state_m", ".", "parent", ".", "states", ".", "values", "(", ")", ":", "if", "sibling_state_m", "is", "state_m", ":", "continue", "sibling_pos", "=", "sibling_state_m", ".", "get_meta_data_editor", "(", ")", "[", "'rel_pos'", "]", "sibling_size", "=", "sibling_state_m", ".", "get_meta_data_editor", "(", ")", "[", "'size'", "]", "distance", "=", "geometry", ".", "cal_dist_between_2_coord_frame_aligned_boxes", "(", "pos", ",", "size", ",", "sibling_pos", ",", "sibling_size", ")", "if", "not", "min_distance", "or", "min_distance", "[", "0", "]", ">", "distance", ":", "min_distance", "=", "(", "distance", ",", "sibling_state_m", ")", "return", "min_distance" ]
Reindex all records .
def reindex ( pid_type ) : click . secho ( 'Sending records to indexing queue ...' , fg = 'green' ) query = ( x [ 0 ] for x in PersistentIdentifier . query . filter_by ( object_type = 'rec' , status = PIDStatus . REGISTERED ) . filter ( PersistentIdentifier . pid_type . in_ ( pid_type ) ) . values ( PersistentIdentifier . object_uuid ) ) RecordIndexer ( ) . bulk_index ( query ) click . secho ( 'Execute "run" command to process the queue!' , fg = 'yellow' )
1,356
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L72-L88
[ "def", "update_thumbnail_via_upload", "(", "api_key", ",", "api_secret", ",", "video_key", ",", "local_video_image_path", "=", "''", ",", "api_format", "=", "'json'", ",", "*", "*", "kwargs", ")", ":", "jwplatform_client", "=", "jwplatform", ".", "Client", "(", "api_key", ",", "api_secret", ")", "logging", ".", "info", "(", "\"Updating video thumbnail.\"", ")", "try", ":", "response", "=", "jwplatform_client", ".", "videos", ".", "thumbnails", ".", "update", "(", "video_key", "=", "video_key", ",", "*", "*", "kwargs", ")", "except", "jwplatform", ".", "errors", ".", "JWPlatformError", "as", "e", ":", "logging", ".", "error", "(", "\"Encountered an error updating thumbnail.\\n{}\"", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "e", ".", "message", ")", "logging", ".", "info", "(", "response", ")", "# Construct base url for upload", "upload_url", "=", "'{}://{}{}'", ".", "format", "(", "response", "[", "'link'", "]", "[", "'protocol'", "]", ",", "response", "[", "'link'", "]", "[", "'address'", "]", ",", "response", "[", "'link'", "]", "[", "'path'", "]", ")", "# Query parameters for the upload", "query_parameters", "=", "response", "[", "'link'", "]", "[", "'query'", "]", "query_parameters", "[", "'api_format'", "]", "=", "api_format", "with", "open", "(", "local_video_image_path", ",", "'rb'", ")", "as", "f", ":", "files", "=", "{", "'file'", ":", "f", "}", "r", "=", "requests", ".", "post", "(", "upload_url", ",", "params", "=", "query_parameters", ",", "files", "=", "files", ")", "logging", ".", "info", "(", "'uploading file {} to url {}'", ".", "format", "(", "local_video_image_path", ",", "r", ".", "url", ")", ")", "logging", ".", "info", "(", "'upload response: {}'", ".", "format", "(", "r", ".", "text", ")", ")" ]
Process queue actions .
def process_actions ( actions ) : queue = current_app . config [ 'INDEXER_MQ_QUEUE' ] with establish_connection ( ) as c : q = queue ( c ) for action in actions : q = action ( q )
1,357
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L98-L104
[ "def", "ReleaseFileObject", "(", "self", ",", "file_object", ")", ":", "identifier", ",", "cache_value", "=", "self", ".", "_file_object_cache", ".", "GetCacheValueByObject", "(", "file_object", ")", "if", "not", "identifier", ":", "raise", "RuntimeError", "(", "'Object not cached.'", ")", "if", "not", "cache_value", ":", "raise", "RuntimeError", "(", "'Invalid cache value.'", ")", "self", ".", "_file_object_cache", ".", "ReleaseObject", "(", "identifier", ")", "result", "=", "cache_value", ".", "IsDereferenced", "(", ")", "if", "result", ":", "self", ".", "_file_object_cache", ".", "RemoveObject", "(", "identifier", ")", "return", "result" ]
Initialize indexing queue .
def init_queue ( ) : def action ( queue ) : queue . declare ( ) click . secho ( 'Indexing queue has been initialized.' , fg = 'green' ) return queue return action
1,358
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L108-L114
[ "def", "_build_template_map", "(", "cookbook", ",", "cookbook_name", ",", "stencil", ")", ":", "template_map", "=", "{", "'cookbook'", ":", "{", "\"name\"", ":", "cookbook_name", "}", ",", "'options'", ":", "stencil", "[", "'options'", "]", "}", "# Cookbooks may not yet have metadata, so we pass an empty dict if so", "try", ":", "template_map", "[", "'cookbook'", "]", "=", "cookbook", ".", "metadata", ".", "to_dict", "(", ")", ".", "copy", "(", ")", "except", "ValueError", ":", "# ValueError may be returned if this cookbook does not yet have any", "# metadata.rb written by a stencil. This is okay, as everyone should", "# be using the base stencil first, and then we'll try to call", "# cookbook.metadata again in this method later down.", "pass", "template_map", "[", "'cookbook'", "]", "[", "'year'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "return", "template_map" ]
Purge indexing queue .
def purge_queue ( ) : def action ( queue ) : queue . purge ( ) click . secho ( 'Indexing queue has been purged.' , fg = 'green' ) return queue return action
1,359
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L118-L124
[ "def", "_build_template_map", "(", "cookbook", ",", "cookbook_name", ",", "stencil", ")", ":", "template_map", "=", "{", "'cookbook'", ":", "{", "\"name\"", ":", "cookbook_name", "}", ",", "'options'", ":", "stencil", "[", "'options'", "]", "}", "# Cookbooks may not yet have metadata, so we pass an empty dict if so", "try", ":", "template_map", "[", "'cookbook'", "]", "=", "cookbook", ".", "metadata", ".", "to_dict", "(", ")", ".", "copy", "(", ")", "except", "ValueError", ":", "# ValueError may be returned if this cookbook does not yet have any", "# metadata.rb written by a stencil. This is okay, as everyone should", "# be using the base stencil first, and then we'll try to call", "# cookbook.metadata again in this method later down.", "pass", "template_map", "[", "'cookbook'", "]", "[", "'year'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "return", "template_map" ]
Delete indexing queue .
def delete_queue ( ) : def action ( queue ) : queue . delete ( ) click . secho ( 'Indexing queue has been deleted.' , fg = 'green' ) return queue return action
1,360
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L128-L134
[ "def", "_build_template_map", "(", "cookbook", ",", "cookbook_name", ",", "stencil", ")", ":", "template_map", "=", "{", "'cookbook'", ":", "{", "\"name\"", ":", "cookbook_name", "}", ",", "'options'", ":", "stencil", "[", "'options'", "]", "}", "# Cookbooks may not yet have metadata, so we pass an empty dict if so", "try", ":", "template_map", "[", "'cookbook'", "]", "=", "cookbook", ".", "metadata", ".", "to_dict", "(", ")", ".", "copy", "(", ")", "except", "ValueError", ":", "# ValueError may be returned if this cookbook does not yet have any", "# metadata.rb written by a stencil. This is okay, as everyone should", "# be using the base stencil first, and then we'll try to call", "# cookbook.metadata again in this method later down.", "pass", "template_map", "[", "'cookbook'", "]", "[", "'year'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "return", "template_map" ]
Make sure that reference nucleotides we expect to see on the reference transcript from a variant are the same ones we encounter .
def variant_matches_reference_sequence ( variant , ref_seq_on_transcript , strand ) : if strand == "-" : ref_seq_on_transcript = reverse_complement_dna ( ref_seq_on_transcript ) return ref_seq_on_transcript == variant . ref
1,361
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_sequence_key.py#L131-L138
[ "def", "filter_parts", "(", "cls", ",", "part_info", ")", ":", "# type: (Type[T], PartInfo) -> Dict[str, List[T]]", "filtered", "=", "OrderedDict", "(", ")", "for", "part_name", ",", "info_list", "in", "part_info", ".", "items", "(", ")", ":", "if", "info_list", "is", "None", "or", "isinstance", "(", "info_list", ",", "Exception", ")", ":", "continue", "info_list", "=", "[", "i", "for", "i", "in", "info_list", "if", "isinstance", "(", "i", ",", "cls", ")", "]", "if", "info_list", ":", "filtered", "[", "part_name", "]", "=", "info_list", "return", "filtered" ]
Extracts the reference sequence around a variant locus on a particular transcript .
def from_variant_and_transcript ( cls , variant , transcript , context_size ) : full_transcript_sequence = transcript . sequence if full_transcript_sequence is None : logger . warn ( "Expected transcript %s (overlapping %s) to have sequence" , transcript . name , variant ) return None # get the interbase range of offsets which capture all reference # bases modified by the variant variant_start_offset , variant_end_offset = interbase_range_affected_by_variant_on_transcript ( variant = variant , transcript = transcript ) reference_cdna_at_variant = full_transcript_sequence [ variant_start_offset : variant_end_offset ] if not variant_matches_reference_sequence ( variant = variant , strand = transcript . strand , ref_seq_on_transcript = reference_cdna_at_variant ) : logger . warn ( "Variant %s doesn't match reference sequence on transcript %s: " "may span splice junction" , variant , transcript ) return None if len ( full_transcript_sequence ) < 6 : # need at least 6 nucleotides for a start and stop codon logger . warn ( "Sequence of %s (overlapping %s) too short: %d" , transcript , variant , len ( full_transcript_sequence ) ) return None logger . info ( "Interbase offset range on %s for variant %s = %d:%d" , transcript . name , variant , variant_start_offset , variant_end_offset ) reference_cdna_before_variant = full_transcript_sequence [ max ( 0 , variant_start_offset - context_size ) : variant_start_offset ] reference_cdna_after_variant = full_transcript_sequence [ variant_end_offset : variant_end_offset + context_size ] return ReferenceSequenceKey ( strand = transcript . strand , sequence_before_variant_locus = reference_cdna_before_variant , sequence_at_variant_locus = reference_cdna_at_variant , sequence_after_variant_locus = reference_cdna_after_variant )
1,362
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_sequence_key.py#L51-L128
[ "def", "update_classroom", "(", "self", ",", "course", ",", "classroomid", ",", "new_data", ")", ":", "student_list", ",", "tutor_list", ",", "other_students", ",", "_", "=", "self", ".", "get_user_lists", "(", "course", ",", "classroomid", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"tutors\"", "]", ")", "if", "tutor", "in", "tutor_list", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "new_data", "[", "\"students\"", "]", "=", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"students\"", "]", ")", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "students", ".", "append", "(", "student", ")", "else", ":", "if", "student", "in", "other_students", ":", "# Remove user from the other classroom", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "tutor_list", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "student_list", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "map", "(", "str", ".", "strip", ",", "group", "[", "\"students\"", "]", ")", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "classroom", "=", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "classroomid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "classroom", ",", "errored_students" ]
Circularly alias the numeric value x into the range [ lower upper ) .
def wrap ( lower , upper , x ) : #I have no idea how I came up with this algorithm; it should be simplified. # # Note that Python's % operator works on floats and arrays; # usually one can simply use that instead. E.g. to wrap array or # scalar x into 0,2*pi, just use "x % (2*pi)". range_ = upper - lower return lower + np . fmod ( x - lower + 2 * range_ * ( 1 - np . floor ( x / ( 2 * range_ ) ) ) , range_ )
1,363
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L503-L515
[ "def", "create_gtk_grid", "(", "self", ",", "row_spacing", "=", "6", ",", "col_spacing", "=", "6", ",", "row_homogenous", "=", "False", ",", "col_homogenous", "=", "True", ")", ":", "grid_lang", "=", "Gtk", ".", "Grid", "(", ")", "grid_lang", ".", "set_column_spacing", "(", "row_spacing", ")", "grid_lang", ".", "set_row_spacing", "(", "col_spacing", ")", "grid_lang", ".", "set_border_width", "(", "12", ")", "grid_lang", ".", "set_row_homogeneous", "(", "row_homogenous", ")", "grid_lang", ".", "set_column_homogeneous", "(", "col_homogenous", ")", "return", "grid_lang" ]
Calculate line width necessary to cover at least one pixel on all axes .
def _pixelsize ( self , p ) : xpixelsize = 1. / float ( p . xdensity ) ypixelsize = 1. / float ( p . ydensity ) return max ( [ xpixelsize , ypixelsize ] )
1,364
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L199-L203
[ "def", "schedule", "(", "self", ",", "materials", "=", "None", ",", "variables", "=", "None", ",", "secure_variables", "=", "None", ")", ":", "return", "self", ".", "_pipeline", ".", "schedule", "(", "name", "=", "self", ".", "data", ".", "name", ",", "materials", "=", "materials", ",", "variables", "=", "variables", ",", "secure_variables", "=", "secure_variables", ")" ]
Count the number of pixels rendered on this line .
def _count_pixels_on_line ( self , y , p ) : h = line ( y , self . _effective_thickness ( p ) , 0.0 ) return h . sum ( )
1,365
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L209-L212
[ "def", "workspace_backup_restore", "(", "ctx", ",", "choose_first", ",", "bak", ")", ":", "backup_manager", "=", "WorkspaceBackupManager", "(", "Workspace", "(", "ctx", ".", "resolver", ",", "directory", "=", "ctx", ".", "directory", ",", "mets_basename", "=", "ctx", ".", "mets_basename", ",", "automatic_backup", "=", "ctx", ".", "automatic_backup", ")", ")", "backup_manager", ".", "restore", "(", "bak", ",", "choose_first", ")" ]
Get the number of channels in the input generators .
def num_channels ( self ) : if ( self . inspect_value ( 'index' ) is None ) : if ( len ( self . generators ) > 0 ) : return self . generators [ 0 ] . num_channels ( ) return 0 return self . get_current_generator ( ) . num_channels ( )
1,366
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L567-L576
[ "def", "_HandleHomepage", "(", "self", ",", "request", ")", ":", "_", "=", "request", "env", "=", "jinja2", ".", "Environment", "(", "loader", "=", "jinja2", ".", "FileSystemLoader", "(", "config", ".", "CONFIG", "[", "\"AdminUI.template_root\"", "]", ")", ",", "autoescape", "=", "True", ")", "create_time", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", ".", "create_time", "(", ")", "context", "=", "{", "\"heading\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.heading\"", "]", ",", "\"report_url\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.report_url\"", "]", ",", "\"help_url\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.help_url\"", "]", ",", "\"timestamp\"", ":", "utils", ".", "SmartStr", "(", "create_time", ")", ",", "\"use_precompiled_js\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.use_precompiled_js\"", "]", ",", "# Used in conjunction with FirebaseWebAuthManager.", "\"firebase_api_key\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_api_key\"", "]", ",", "\"firebase_auth_domain\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_auth_domain\"", "]", ",", "\"firebase_auth_provider\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_auth_provider\"", "]", ",", "\"grr_version\"", ":", "config", ".", "CONFIG", "[", "\"Source.version_string\"", "]", "}", "template", "=", "env", ".", "get_template", "(", "\"base.html\"", ")", "response", "=", "werkzeug_wrappers", ".", "Response", "(", "template", ".", "render", "(", "context", ")", ",", "mimetype", "=", "\"text/html\"", ")", "# For a redirect-based Firebase authentication scheme we won't have any", "# user information at this point - therefore checking if the user is", "# present.", "try", ":", "StoreCSRFCookie", "(", "request", ".", "user", ",", "response", ")", "except", "RequestHasNoUser", ":", "pass", "return", "response" ]
Frequency spacing to use i . e . how to map the available frequency range to the discrete sheet rows .
def _set_frequency_spacing ( self , min_freq , max_freq ) : self . frequency_spacing = np . linspace ( min_freq , max_freq , num = self . _sheet_dimensions [ 0 ] + 1 , endpoint = True )
1,367
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L1401-L1415
[ "def", "getPeers", "(", "self", ")", ":", "parentNode", "=", "self", ".", "parentNode", "# If no parent, no peers", "if", "not", "parentNode", ":", "return", "None", "peers", "=", "parentNode", ".", "children", "# Otherwise, get all children of parent excluding this node", "return", "TagCollection", "(", "[", "peer", "for", "peer", "in", "peers", "if", "peer", "is", "not", "self", "]", ")" ]
Python to postgres encoding map .
def get_postgres_encoding ( python_encoding : str ) -> str : encoding = normalize_encoding ( python_encoding . lower ( ) ) encoding_ = aliases . aliases [ encoding . replace ( '_' , '' , 1 ) ] . upper ( ) pg_encoding = PG_ENCODING_MAP [ encoding_ . replace ( '_' , '' ) ] return pg_encoding
1,368
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/pg_encodings.py#L15-L22
[ "def", "verify", "(", "self", ",", "proof", ",", "chal", ",", "state", ")", ":", "state", ".", "decrypt", "(", "self", ".", "key", ")", "index", "=", "KeyedPRF", "(", "chal", ".", "key", ",", "state", ".", "chunks", ")", "v", "=", "KeyedPRF", "(", "chal", ".", "key", ",", "chal", ".", "v_max", ")", "f", "=", "KeyedPRF", "(", "state", ".", "f_key", ",", "self", ".", "prime", ")", "alpha", "=", "KeyedPRF", "(", "state", ".", "alpha_key", ",", "self", ".", "prime", ")", "rhs", "=", "0", "for", "i", "in", "range", "(", "0", ",", "chal", ".", "chunks", ")", ":", "rhs", "+=", "v", ".", "eval", "(", "i", ")", "*", "f", ".", "eval", "(", "index", ".", "eval", "(", "i", ")", ")", "for", "j", "in", "range", "(", "0", ",", "self", ".", "sectors", ")", ":", "rhs", "+=", "alpha", ".", "eval", "(", "j", ")", "*", "proof", ".", "mu", "[", "j", "]", "rhs", "%=", "self", ".", "prime", "return", "proof", ".", "sigma", "==", "rhs" ]
Report the energies from the last SCF present in the output .
def en_last ( self ) : # Initialize the return dict last_ens = dict ( ) # Iterate and store for ( k , l ) in self . en . items ( ) : last_ens . update ( { k : l [ - 1 ] if l != [ ] else None } ) ##next (k,l) # Should be ready to return? return last_ens
1,369
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/output.py#L792-L818
[ "def", "shares", "(", "self", ",", "token", ":", "dict", "=", "None", ",", "prot", ":", "str", "=", "\"https\"", ")", "->", "dict", ":", "# passing auth parameter", "shares_url", "=", "\"{}://v1.{}.isogeo.com/shares/\"", ".", "format", "(", "prot", ",", "self", ".", "api_url", ")", "shares_req", "=", "self", ".", "get", "(", "shares_url", ",", "headers", "=", "self", ".", "header", ",", "proxies", "=", "self", ".", "proxies", ",", "verify", "=", "self", ".", "ssl", ")", "# checking response", "checker", ".", "check_api_response", "(", "shares_req", ")", "# end of method", "return", "shares_req", ".", "json", "(", ")" ]
Create a database connection .
def connect ( host = None , database = None , user = None , password = None , * * kwargs ) : host = host or os . environ [ 'PGHOST' ] database = database or os . environ [ 'PGDATABASE' ] user = user or os . environ [ 'PGUSER' ] password = password or os . environ [ 'PGPASSWORD' ] return psycopg2 . connect ( host = host , database = database , user = user , password = password , * * kwargs )
1,370
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/connections.py#L8-L20
[ "def", "ae_partial_waves", "(", "self", ")", ":", "ae_partial_waves", "=", "OrderedDict", "(", ")", "for", "mesh", ",", "values", ",", "attrib", "in", "self", ".", "_parse_all_radfuncs", "(", "\"ae_partial_wave\"", ")", ":", "state", "=", "attrib", "[", "\"state\"", "]", "#val_state = self.valence_states[state]", "ae_partial_waves", "[", "state", "]", "=", "RadialFunction", "(", "mesh", ",", "values", ")", "return", "ae_partial_waves" ]
Set up module .
def _setup ( ) : _SOCKET . setsockopt ( socket . SOL_SOCKET , socket . SO_BROADCAST , 1 ) _SOCKET . bind ( ( '' , PORT ) ) udp = threading . Thread ( target = _listen , daemon = True ) udp . start ( )
1,371
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L50-L58
[ "def", "array_equiv", "(", "arr1", ",", "arr2", ")", ":", "arr1", ",", "arr2", "=", "as_like_arrays", "(", "arr1", ",", "arr2", ")", "if", "arr1", ".", "shape", "!=", "arr2", ".", "shape", ":", "return", "False", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "\"In the future, 'NAT == x'\"", ")", "flag_array", "=", "(", "arr1", "==", "arr2", ")", "flag_array", "|=", "(", "isnull", "(", "arr1", ")", "&", "isnull", "(", "arr2", ")", ")", "return", "bool", "(", "flag_array", ".", "all", "(", ")", ")" ]
Discover devices on the local network .
def discover ( timeout = DISCOVERY_TIMEOUT ) : hosts = { } payload = MAGIC + DISCOVERY for _ in range ( RETRIES ) : _SOCKET . sendto ( bytearray ( payload ) , ( '255.255.255.255' , PORT ) ) start = time . time ( ) while time . time ( ) < start + timeout : for host , data in _BUFFER . copy ( ) . items ( ) : if not _is_discovery_response ( data ) : continue if host not in hosts : _LOGGER . debug ( "Discovered device at %s" , host ) entry = { } entry [ 'mac' ] = data [ 7 : 13 ] entry [ 'imac' ] = data [ 19 : 25 ] entry [ 'next' ] = 0 entry [ 'st' ] = int ( data [ - 1 ] ) entry [ 'time' ] = _device_time ( data [ 37 : 41 ] ) entry [ 'serverTime' ] = int ( time . time ( ) ) hosts [ host ] = entry return hosts
1,372
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L66-L91
[ "def", "_unscramble_regressor_columns", "(", "parent_data", ",", "data", ")", ":", "matches", "=", "[", "'_power[0-9]+'", ",", "'_derivative[0-9]+'", "]", "var", "=", "OrderedDict", "(", "(", "c", ",", "deque", "(", ")", ")", "for", "c", "in", "parent_data", ".", "columns", ")", "for", "c", "in", "data", ".", "columns", ":", "col", "=", "c", "for", "m", "in", "matches", ":", "col", "=", "re", ".", "sub", "(", "m", ",", "''", ",", "col", ")", "if", "col", "==", "c", ":", "var", "[", "col", "]", ".", "appendleft", "(", "c", ")", "else", ":", "var", "[", "col", "]", ".", "append", "(", "c", ")", "unscrambled", "=", "reduce", "(", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ")", ",", "var", ".", "values", "(", ")", ")", "return", "data", "[", "[", "*", "unscrambled", "]", "]" ]
Discovers MAC address of device .
def _discover_mac ( self ) : mac = None mac_reversed = None cmd = MAGIC + DISCOVERY resp = self . _udp_transact ( cmd , self . _discovery_resp , broadcast = True , timeout = DISCOVERY_TIMEOUT ) if resp : ( mac , mac_reversed ) = resp if mac is None : raise S20Exception ( "Couldn't discover {}" . format ( self . host ) ) return ( mac , mac_reversed )
1,373
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L167-L188
[ "def", "set_cdn_log_retention", "(", "self", ",", "container", ",", "enabled", ")", ":", "headers", "=", "{", "\"X-Log-Retention\"", ":", "\"%s\"", "%", "enabled", "}", "self", ".", "api", ".", "cdn_request", "(", "\"/%s\"", "%", "utils", ".", "get_name", "(", "container", ")", ",", "method", "=", "\"PUT\"", ",", "headers", "=", "headers", ")" ]
Subscribe to the device .
def _subscribe ( self ) : cmd = MAGIC + SUBSCRIBE + self . _mac + PADDING_1 + self . _mac_reversed + PADDING_1 status = self . _udp_transact ( cmd , self . _subscribe_resp ) if status is not None : self . last_subscribed = time . time ( ) return status == ON else : raise S20Exception ( "No status could be found for {}" . format ( self . host ) )
1,374
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L190-L206
[ "def", "clean_subject_location", "(", "self", ")", ":", "cleaned_data", "=", "super", "(", "ImageAdminForm", ",", "self", ")", ".", "clean", "(", ")", "subject_location", "=", "cleaned_data", "[", "'subject_location'", "]", "if", "not", "subject_location", ":", "# if supplied subject location is empty, do not check it", "return", "subject_location", "# use thumbnail's helper function to check the format", "coordinates", "=", "normalize_subject_location", "(", "subject_location", ")", "if", "not", "coordinates", ":", "err_msg", "=", "ugettext_lazy", "(", "'Invalid subject location format. '", ")", "err_code", "=", "'invalid_subject_format'", "elif", "(", "coordinates", "[", "0", "]", ">", "self", ".", "instance", ".", "width", "or", "coordinates", "[", "1", "]", ">", "self", ".", "instance", ".", "height", ")", ":", "err_msg", "=", "ugettext_lazy", "(", "'Subject location is outside of the image. '", ")", "err_code", "=", "'subject_out_of_bounds'", "else", ":", "return", "subject_location", "self", ".", "_set_previous_subject_location", "(", "cleaned_data", ")", "raise", "forms", ".", "ValidationError", "(", "string_concat", "(", "err_msg", ",", "ugettext_lazy", "(", "'Your input: \"{subject_location}\". '", ".", "format", "(", "subject_location", "=", "subject_location", ")", ")", ",", "'Previous value is restored.'", ")", ",", "code", "=", "err_code", ")" ]
Control device state .
def _control ( self , state ) : # Renew subscription if necessary if not self . _subscription_is_recent ( ) : self . _subscribe ( ) cmd = MAGIC + CONTROL + self . _mac + PADDING_1 + PADDING_2 + state _LOGGER . debug ( "Sending new state to %s: %s" , self . host , ord ( state ) ) ack_state = self . _udp_transact ( cmd , self . _control_resp , state ) if ack_state is None : raise S20Exception ( "Device didn't acknowledge control request: {}" . format ( self . host ) )
1,375
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L215-L233
[ "def", "removeAllChildrenAtIndex", "(", "self", ",", "parentIndex", ")", ":", "if", "not", "parentIndex", ".", "isValid", "(", ")", ":", "logger", ".", "debug", "(", "\"No valid item selected for deletion (ignored).\"", ")", "return", "parentItem", "=", "self", ".", "getItem", "(", "parentIndex", ",", "None", ")", "logger", ".", "debug", "(", "\"Removing children of {!r}\"", ".", "format", "(", "parentItem", ")", ")", "assert", "parentItem", ",", "\"parentItem not found\"", "#firstChildRow = self.index(0, 0, parentIndex).row()", "#lastChildRow = self.index(parentItem.nChildren()-1, 0, parentIndex).row()", "#logger.debug(\"Removing rows: {} to {}\".format(firstChildRow, lastChildRow))", "#self.beginRemoveRows(parentIndex, firstChildRow, lastChildRow)", "self", ".", "beginRemoveRows", "(", "parentIndex", ",", "0", ",", "parentItem", ".", "nChildren", "(", ")", "-", "1", ")", "try", ":", "parentItem", ".", "removeAllChildren", "(", ")", "finally", ":", "self", ".", "endRemoveRows", "(", ")", "logger", ".", "debug", "(", "\"removeAllChildrenAtIndex completed\"", ")" ]
Handle a discovery response .
def _discovery_resp ( self , data ) : if _is_discovery_response ( data ) : _LOGGER . debug ( "Discovered MAC of %s: %s" , self . host , binascii . hexlify ( data [ 7 : 13 ] ) . decode ( ) ) return ( data [ 7 : 13 ] , data [ 19 : 25 ] )
1,376
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L235-L245
[ "def", "_project_perturbation", "(", "perturbation", ",", "epsilon", ",", "input_image", ",", "clip_min", "=", "None", ",", "clip_max", "=", "None", ")", ":", "if", "clip_min", "is", "None", "or", "clip_max", "is", "None", ":", "raise", "NotImplementedError", "(", "\"_project_perturbation currently has clipping \"", "\"hard-coded in.\"", ")", "# Ensure inputs are in the correct range", "with", "tf", ".", "control_dependencies", "(", "[", "utils_tf", ".", "assert_less_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_max", ",", "input_image", ".", "dtype", ")", ")", ",", "utils_tf", ".", "assert_greater_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_min", ",", "input_image", ".", "dtype", ")", ")", "]", ")", ":", "clipped_perturbation", "=", "utils_tf", ".", "clip_by_value", "(", "perturbation", ",", "-", "epsilon", ",", "epsilon", ")", "new_image", "=", "utils_tf", ".", "clip_by_value", "(", "input_image", "+", "clipped_perturbation", ",", "clip_min", ",", "clip_max", ")", "return", "new_image", "-", "input_image" ]
Handle a subscribe response .
def _subscribe_resp ( self , data ) : if _is_subscribe_response ( data ) : status = bytes ( [ data [ 23 ] ] ) _LOGGER . debug ( "Successfully subscribed to %s, state: %s" , self . host , ord ( status ) ) return status
1,377
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L247-L257
[ "def", "get", "(", "self", ",", "blob_hash", ")", ":", "if", "blob_hash", "is", "None", ":", "return", "None", "store", "=", "blob_hash", "[", "STORE_HASH_LENGTH", ":", "]", "store", "=", "'external'", "+", "(", "'-'", "if", "store", "else", "''", ")", "+", "store", "cache_folder", "=", "config", ".", "get", "(", "'cache'", ",", "None", ")", "blob", "=", "None", "if", "cache_folder", ":", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "cache_folder", ",", "blob_hash", ")", ",", "'rb'", ")", "as", "f", ":", "blob", "=", "f", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "pass", "if", "blob", "is", "None", ":", "spec", "=", "self", ".", "_get_store_spec", "(", "store", ")", "if", "spec", "[", "'protocol'", "]", "==", "'file'", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "spec", "[", "'location'", "]", ",", "self", ".", "database", ",", "blob_hash", ")", "try", ":", "with", "open", "(", "full_path", ",", "'rb'", ")", "as", "f", ":", "blob", "=", "f", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "raise", "DataJointError", "(", "'Lost access to external blob %s.'", "%", "full_path", ")", "from", "None", "elif", "spec", "[", "'protocol'", "]", "==", "'s3'", ":", "try", ":", "blob", "=", "S3Folder", "(", "database", "=", "self", ".", "database", ",", "*", "*", "spec", ")", ".", "get", "(", "blob_hash", ")", "except", "TypeError", ":", "raise", "DataJointError", "(", "'External store {store} configuration is incomplete.'", ".", "format", "(", "store", "=", "store", ")", ")", "else", ":", "raise", "DataJointError", "(", "'Unknown external storage protocol \"%s\"'", "%", "spec", "[", "'protocol'", "]", ")", "if", "cache_folder", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "cache_folder", ")", ":", "os", ".", "makedirs", "(", "cache_folder", ")", "safe_write", "(", "os", ".", "path", ".", "join", "(", "cache_folder", ",", "blob_hash", ")", ",", "blob", ")", "return", "unpack", "(", "blob", ")" ]
Handle a control response .
def _control_resp ( self , data , state ) : if _is_control_response ( data ) : ack_state = bytes ( [ data [ 22 ] ] ) if state == ack_state : _LOGGER . debug ( "Received state ack from %s, state: %s" , self . host , ord ( ack_state ) ) return ack_state
1,378
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L259-L271
[ "def", "factory", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", ":", "# fileobject from lxml.objectify", "if", "hasattr", "(", "fileobject", ",", "'currentRevision'", ")", ":", "# a normal file", "return", "JFSFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "elif", "str", "(", "fileobject", ".", "latestRevision", ".", "state", ")", "==", "ProtoFile", ".", "STATE_INCOMPLETE", ":", "return", "JFSIncompleteFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "elif", "str", "(", "fileobject", ".", "latestRevision", ".", "state", ")", "==", "ProtoFile", ".", "STATE_CORRUPT", ":", "return", "JFSCorruptFile", "(", "fileobject", ",", "jfs", ",", "parentpath", ")", "else", ":", "raise", "NotImplementedError", "(", "'No JFS*File support for state %r. Please file a bug!'", "%", "fileobject", ".", "latestRevision", ".", "state", ")" ]
Complete a UDP transaction .
def _udp_transact ( self , payload , handler , * args , broadcast = False , timeout = TIMEOUT ) : if self . host in _BUFFER : del _BUFFER [ self . host ] host = self . host if broadcast : host = '255.255.255.255' retval = None for _ in range ( RETRIES ) : _SOCKET . sendto ( bytearray ( payload ) , ( host , PORT ) ) start = time . time ( ) while time . time ( ) < start + timeout : data = _BUFFER . get ( self . host , None ) if data : retval = handler ( data , * args ) # Return as soon as a response is received if retval : return retval
1,379
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L273-L303
[ "def", "compute_score", "(", "subtitle", ",", "video", ",", "hearing_impaired", "=", "None", ")", ":", "logger", ".", "info", "(", "'Computing score of %r for video %r with %r'", ",", "subtitle", ",", "video", ",", "dict", "(", "hearing_impaired", "=", "hearing_impaired", ")", ")", "# get the scores dict", "scores", "=", "get_scores", "(", "video", ")", "logger", ".", "debug", "(", "'Using scores %r'", ",", "scores", ")", "# get the matches", "matches", "=", "subtitle", ".", "get_matches", "(", "video", ")", "logger", ".", "debug", "(", "'Found matches %r'", ",", "matches", ")", "# on hash match, discard everything else", "if", "'hash'", "in", "matches", ":", "logger", ".", "debug", "(", "'Keeping only hash match'", ")", "matches", "&=", "{", "'hash'", "}", "# handle equivalent matches", "if", "isinstance", "(", "video", ",", "Episode", ")", ":", "if", "'title'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding title match equivalent'", ")", "matches", ".", "add", "(", "'episode'", ")", "if", "'series_imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding series_imdb_id match equivalent'", ")", "matches", "|=", "{", "'series'", ",", "'year'", "}", "if", "'imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding imdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", ",", "'season'", ",", "'episode'", "}", "if", "'tvdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding tvdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", ",", "'season'", ",", "'episode'", "}", "if", "'series_tvdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding series_tvdb_id match equivalents'", ")", "matches", "|=", "{", "'series'", ",", "'year'", "}", "elif", "isinstance", "(", "video", ",", "Movie", ")", ":", "if", "'imdb_id'", "in", "matches", ":", "logger", ".", "debug", "(", "'Adding imdb_id match equivalents'", ")", "matches", "|=", "{", "'title'", ",", "'year'", "}", "# handle hearing impaired", "if", "hearing_impaired", "is", "not", "None", "and", "subtitle", ".", "hearing_impaired", "==", "hearing_impaired", ":", "logger", ".", "debug", "(", "'Matched hearing_impaired'", ")", "matches", ".", "add", "(", "'hearing_impaired'", ")", "# compute the score", "score", "=", "sum", "(", "(", "scores", ".", "get", "(", "match", ",", "0", ")", "for", "match", "in", "matches", ")", ")", "logger", ".", "info", "(", "'Computed score %r with final matches %r'", ",", "score", ",", "matches", ")", "# ensure score is within valid bounds", "assert", "0", "<=", "score", "<=", "scores", "[", "'hash'", "]", "+", "scores", "[", "'hearing_impaired'", "]", "return", "score" ]
Load OpenCorpora corpus .
def load ( source ) : parser = get_xml_parser ( ) return etree . parse ( source , parser = parser ) . getroot ( )
1,380
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader_lxml.py#L11-L24
[ "def", "initialize_options", "(", "self", ",", "*", "args", ")", ":", "import", "distutils", ".", "sysconfig", "cfg_vars", "=", "distutils", ".", "sysconfig", ".", "get_config_vars", "(", ")", "# if 'CFLAGS' in cfg_vars:", "# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')", "for", "k", ",", "v", "in", "cfg_vars", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "str", ")", "and", "v", ".", "find", "(", "\"-Wstrict-prototypes\"", ")", ":", "v", "=", "v", ".", "replace", "(", "'-Wstrict-prototypes'", ",", "''", ")", "cfg_vars", "[", "k", "]", "=", "v", "if", "isinstance", "(", "v", ",", "str", ")", "and", "v", ".", "find", "(", "\"-Wimplicit-function-declaration\"", ")", ":", "v", "=", "v", ".", "replace", "(", "'-Wimplicit-function-declaration'", ",", "''", ")", "cfg_vars", "[", "k", "]", "=", "v", "build_ext", ".", "initialize_options", "(", "self", ")" ]
Given all detected VariantSequence objects for a particular variant and all the ReferenceContext objects for that locus translate multiple protein sequences up to the number specified by the argument max_protein_sequences_per_variant .
def translation_generator ( variant_sequences , reference_contexts , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant , protein_sequence_length = None ) : for reference_context in reference_contexts : for variant_sequence in variant_sequences : translation = Translation . from_variant_sequence_and_reference_context ( variant_sequence = variant_sequence , reference_context = reference_context , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , protein_sequence_length = protein_sequence_length ) if translation is not None : yield translation
1,381
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L355-L405
[ "def", "retry_failed", "(", "FailAdmin", ",", "request", ",", "queryset", ")", ":", "for", "task", "in", "queryset", ":", "async_task", "(", "task", ".", "func", ",", "*", "task", ".", "args", "or", "(", ")", ",", "hook", "=", "task", ".", "hook", ",", "*", "*", "task", ".", "kwargs", "or", "{", "}", ")", "task", ".", "delete", "(", ")" ]
Given a variant and its associated alt reads construct variant sequences and translate them into Translation objects .
def translate_variant_reads ( variant , variant_reads , protein_sequence_length , transcript_id_whitelist = None , min_alt_rna_reads = MIN_ALT_RNA_READS , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , min_transcript_prefix_length = MIN_TRANSCRIPT_PREFIX_LENGTH , max_transcript_mismatches = MAX_REFERENCE_TRANSCRIPT_MISMATCHES , include_mismatches_after_variant = INCLUDE_MISMATCHES_AFTER_VARIANT , variant_sequence_assembly = VARIANT_SEQUENCE_ASSEMBLY ) : if len ( variant_reads ) == 0 : logger . info ( "No supporting reads for variant %s" , variant ) return [ ] # Adding an extra codon to the desired RNA sequence length in case we # need to clip nucleotides at the start/end of the sequence cdna_sequence_length = ( protein_sequence_length + 1 ) * 3 variant_sequences = reads_to_variant_sequences ( variant = variant , reads = variant_reads , preferred_sequence_length = cdna_sequence_length , min_alt_rna_reads = min_alt_rna_reads , min_variant_sequence_coverage = min_variant_sequence_coverage , variant_sequence_assembly = variant_sequence_assembly ) if not variant_sequences : logger . info ( "No spanning cDNA sequences for variant %s" , variant ) return [ ] # try translating the variant sequences from the same set of # ReferenceContext objects, which requires using the longest # context_size to be compatible with all of the sequences. Some # sequences maybe have fewer nucleotides than this before the variant # and will thus have to be trimmed. context_size = max ( len ( variant_sequence . prefix ) for variant_sequence in variant_sequences ) reference_contexts = reference_contexts_for_variant ( variant , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) return list ( translation_generator ( variant_sequences = variant_sequences , reference_contexts = reference_contexts , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , protein_sequence_length = protein_sequence_length ) )
1,382
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L408-L505
[ "def", "boundingBox", "(", "booleanArray", ")", ":", "w", "=", "np", ".", "where", "(", "booleanArray", ")", "p", "=", "[", "]", "for", "i", "in", "w", ":", "if", "len", "(", "i", ")", ":", "p", ".", "append", "(", "slice", "(", "i", ".", "min", "(", ")", ",", "i", ".", "max", "(", ")", ")", ")", "else", ":", "p", ".", "append", "(", "slice", "(", "0", ",", "0", ")", ")", "# return None", "return", "tuple", "(", "p", ")" ]
Project Translation object or any other derived class into just a TranslationKey which has fewer fields and can be used as a dictionary key .
def as_translation_key ( self ) : return TranslationKey ( * * { name : getattr ( self , name ) for name in TranslationKey . _fields } )
1,383
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L150-L158
[ "def", "open", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "is_opened", "(", ")", "and", "self", ".", "workbook", ".", "file_path", "==", "file_path", ":", "self", ".", "_logger", ".", "logger", ".", "debug", "(", "\"workbook already opened: {}\"", ".", "format", "(", "self", ".", "workbook", ".", "file_path", ")", ")", "return", "self", ".", "close", "(", ")", "self", ".", "_open", "(", "file_path", ")" ]
Attempt to translate a single VariantSequence using the reading frame from a single ReferenceContext .
def from_variant_sequence_and_reference_context ( cls , variant_sequence , reference_context , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant , protein_sequence_length = None ) : variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context ( variant_sequence , reference_context , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant ) if variant_sequence_in_reading_frame is None : logger . info ( "Unable to determine reading frame for %s" , variant_sequence ) return None cdna_sequence = variant_sequence_in_reading_frame . cdna_sequence cdna_codon_offset = variant_sequence_in_reading_frame . offset_to_first_complete_codon # get the offsets into the cDNA sequence which pick out the variant nucleotides cdna_variant_start_offset = variant_sequence_in_reading_frame . variant_cdna_interval_start cdna_variant_end_offset = variant_sequence_in_reading_frame . variant_cdna_interval_end # TODO: determine if the first codon is the start codon of a # transcript, for now any of the unusual start codons like CTG # will translate to leucine instead of methionine. variant_amino_acids , ends_with_stop_codon = translate_cdna ( cdna_sequence [ cdna_codon_offset : ] , first_codon_is_start = False , mitochondrial = reference_context . mitochondrial ) variant_aa_interval_start , variant_aa_interval_end , frameshift = find_mutant_amino_acid_interval ( cdna_sequence = cdna_sequence , cdna_first_codon_offset = cdna_codon_offset , cdna_variant_start_offset = cdna_variant_start_offset , cdna_variant_end_offset = cdna_variant_end_offset , n_ref = len ( reference_context . sequence_at_variant_locus ) , n_amino_acids = len ( variant_amino_acids ) ) if protein_sequence_length and len ( variant_amino_acids ) > protein_sequence_length : if protein_sequence_length <= variant_aa_interval_start : logger . warn ( ( "Truncating amino acid sequence %s " "to only %d elements loses all variant residues" ) , variant_amino_acids , protein_sequence_length ) return None # if the protein is too long then shorten it, which implies # we're no longer stopping due to a stop codon and that the variant # amino acids might need a new stop index variant_amino_acids = variant_amino_acids [ : protein_sequence_length ] variant_aa_interval_end = min ( variant_aa_interval_end , protein_sequence_length ) ends_with_stop_codon = False return Translation ( amino_acids = variant_amino_acids , frameshift = frameshift , ends_with_stop_codon = ends_with_stop_codon , variant_aa_interval_start = variant_aa_interval_start , variant_aa_interval_end = variant_aa_interval_end , untrimmed_variant_sequence = variant_sequence , reference_context = reference_context , variant_sequence_in_reading_frame = variant_sequence_in_reading_frame )
1,384
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L161-L258
[ "async", "def", "save", "(", "self", ")", ":", "orig_owner_data", "=", "self", ".", "_orig_data", "[", "'owner_data'", "]", "new_owner_data", "=", "dict", "(", "self", ".", "_data", "[", "'owner_data'", "]", ")", "self", ".", "_changed_data", ".", "pop", "(", "'owner_data'", ",", "None", ")", "await", "super", "(", "Machine", ",", "self", ")", ".", "save", "(", ")", "params_diff", "=", "calculate_dict_diff", "(", "orig_owner_data", ",", "new_owner_data", ")", "if", "len", "(", "params_diff", ")", ">", "0", ":", "params_diff", "[", "'system_id'", "]", "=", "self", ".", "system_id", "await", "self", ".", "_handler", ".", "set_owner_data", "(", "*", "*", "params_diff", ")", "self", ".", "_data", "[", "'owner_data'", "]", "=", "self", ".", "_data", "[", "'owner_data'", "]" ]
Create a new component .
def postComponents ( self , name , status , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'status' ] = status return self . __postRequest ( '/components' , kwargs )
1,385
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L51-L67
[ "def", "back_off_until", "(", "self", ")", ":", "if", "self", ".", "_back_off_until", "is", "None", ":", "return", "None", "with", "self", ".", "_back_off_lock", ":", "if", "self", ".", "_back_off_until", "is", "None", ":", "return", "None", "if", "self", ".", "_back_off_until", "<", "datetime", ".", "datetime", ".", "now", "(", ")", ":", "self", ".", "_back_off_until", "=", "None", "# The backoff value has expired. Reset", "return", "None", "return", "self", ".", "_back_off_until" ]
Create a new incident .
def postIncidents ( self , name , message , status , visible , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'message' ] = message kwargs [ 'status' ] = status kwargs [ 'visible' ] = visible return self . __postRequest ( '/incidents' , kwargs )
1,386
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L168-L186
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Create a new metric .
def postMetrics ( self , name , suffix , description , default_value , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'suffix' ] = suffix kwargs [ 'description' ] = description kwargs [ 'default_value' ] = default_value return self . __postRequest ( '/metrics' , kwargs )
1,387
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L223-L239
[ "def", "libvlc_video_set_subtitle_file", "(", "p_mi", ",", "psz_subtitle", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_subtitle_file'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_subtitle_file'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaPlayer", ",", "ctypes", ".", "c_char_p", ")", "return", "f", "(", "p_mi", ",", "psz_subtitle", ")" ]
Add a metric point to a given metric .
def postMetricsPointsByID ( self , id , value , * * kwargs ) : kwargs [ 'value' ] = value return self . __postRequest ( '/metrics/%s/points' % id , kwargs )
1,388
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L271-L282
[ "def", "from_compact", "(", "cls", ":", "Type", "[", "TransactionType", "]", ",", "currency", ":", "str", ",", "compact", ":", "str", ")", "->", "TransactionType", ":", "lines", "=", "compact", ".", "splitlines", "(", "True", ")", "n", "=", "0", "header_data", "=", "Transaction", ".", "re_header", ".", "match", "(", "lines", "[", "n", "]", ")", "if", "header_data", "is", "None", ":", "raise", "MalformedDocumentError", "(", "\"Compact TX header\"", ")", "version", "=", "int", "(", "header_data", ".", "group", "(", "1", ")", ")", "issuers_num", "=", "int", "(", "header_data", ".", "group", "(", "2", ")", ")", "inputs_num", "=", "int", "(", "header_data", ".", "group", "(", "3", ")", ")", "unlocks_num", "=", "int", "(", "header_data", ".", "group", "(", "4", ")", ")", "outputs_num", "=", "int", "(", "header_data", ".", "group", "(", "5", ")", ")", "has_comment", "=", "int", "(", "header_data", ".", "group", "(", "6", ")", ")", "locktime", "=", "int", "(", "header_data", ".", "group", "(", "7", ")", ")", "n", "+=", "1", "blockstamp", "=", "None", "# type: Optional[BlockUID]", "if", "version", ">=", "3", ":", "blockstamp", "=", "BlockUID", ".", "from_str", "(", "Transaction", ".", "parse_field", "(", "\"CompactBlockstamp\"", ",", "lines", "[", "n", "]", ")", ")", "n", "+=", "1", "issuers", "=", "[", "]", "inputs", "=", "[", "]", "unlocks", "=", "[", "]", "outputs", "=", "[", "]", "signatures", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "issuers_num", ")", ":", "issuer", "=", "Transaction", ".", "parse_field", "(", "\"Pubkey\"", ",", "lines", "[", "n", "]", ")", "issuers", ".", "append", "(", "issuer", ")", "n", "+=", "1", "for", "i", "in", "range", "(", "0", ",", "inputs_num", ")", ":", "input_source", "=", "InputSource", ".", "from_inline", "(", "version", ",", "lines", "[", "n", "]", ")", "inputs", ".", "append", "(", "input_source", ")", "n", "+=", "1", "for", "i", "in", "range", "(", "0", ",", "unlocks_num", ")", ":", "unlock", "=", "Unlock", ".", "from_inline", "(", "lines", "[", "n", "]", ")", "unlocks", ".", "append", "(", "unlock", ")", "n", "+=", "1", "for", "i", "in", "range", "(", "0", ",", "outputs_num", ")", ":", "output_source", "=", "OutputSource", ".", "from_inline", "(", "lines", "[", "n", "]", ")", "outputs", ".", "append", "(", "output_source", ")", "n", "+=", "1", "comment", "=", "\"\"", "if", "has_comment", "==", "1", ":", "data", "=", "Transaction", ".", "re_compact_comment", ".", "match", "(", "lines", "[", "n", "]", ")", "if", "data", ":", "comment", "=", "data", ".", "group", "(", "1", ")", "n", "+=", "1", "else", ":", "raise", "MalformedDocumentError", "(", "\"Compact TX Comment\"", ")", "while", "n", "<", "len", "(", "lines", ")", ":", "data", "=", "Transaction", ".", "re_signature", ".", "match", "(", "lines", "[", "n", "]", ")", "if", "data", ":", "signatures", ".", "append", "(", "data", ".", "group", "(", "1", ")", ")", "n", "+=", "1", "else", ":", "raise", "MalformedDocumentError", "(", "\"Compact TX Signatures\"", ")", "return", "cls", "(", "version", ",", "currency", ",", "blockstamp", ",", "locktime", ",", "issuers", ",", "inputs", ",", "unlocks", ",", "outputs", ",", "comment", ",", "signatures", ")" ]
Calculate the center of mass of the indicated geometry .
def ctr_mass ( geom , masses ) : # Imports import numpy as np from . base import safe_cast as scast # Shape check if len ( geom . shape ) != 1 : raise ValueError ( "Geometry is not a vector" ) ## end if if len ( masses . shape ) != 1 : raise ValueError ( "Masses cannot be parsed as a vector" ) ## end if if not geom . shape [ 0 ] % 3 == 0 : raise ValueError ( "Geometry is not length-3N" ) ## end if if geom . shape [ 0 ] != 3 * masses . shape [ 0 ] and geom . shape [ 0 ] != masses . shape [ 0 ] : raise ValueError ( "Inconsistent geometry and masses vector lengths" ) ## end if # If N masses are provided, expand to 3N; if 3N, retain. if geom . shape [ 0 ] == 3 * masses . shape [ 0 ] : masses = masses . repeat ( 3 ) ## end if # Calculate the mass-weighted coordinates, reshape to group by coordinate # column-wise, sum each column, then divide by the sum of masses, which # must further be divided by three because there are three replicates # (possibly perturbed) of the mass of each atom. ctr = np . multiply ( geom , masses ) . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) . sum ( axis = 0 ) . squeeze ( ) / ( masses . sum ( ) / 3 ) # Return the vector return ctr
1,389
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L52-L113
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Returns geometry shifted to center of mass .
def ctr_geom ( geom , masses ) : # Imports import numpy as np # Calculate the shift vector. Possible bad shape of geom or masses is # addressed internally by the ctr_mass call. shift = np . tile ( ctr_mass ( geom , masses ) , geom . shape [ 0 ] / 3 ) # Shift the geometry and return ctr_geom = geom - shift return ctr_geom
1,390
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L119-L158
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Generate the 3x3 moment - of - inertia tensor .
def inertia_tensor ( geom , masses ) : # Imports import numpy as np # Center the geometry. Takes care of any improper shapes of geom or # masses via the internal call to 'ctr_mass' within the call to 'ctr_geom' geom = ctr_geom ( geom , masses ) # Expand the masses if required. Shape should only ever be (N,) or (3N,), # else would raise an exception within the above 'ctr_geom' call if geom . shape [ 0 ] == 3 * masses . shape [ 0 ] : masses = masses . repeat ( 3 ) ## end if # Initialize the tensor matrix tensor = np . zeros ( ( 3 , 3 ) ) # Fill the matrix for i in range ( 3 ) : for j in range ( i , 3 ) : if i == j : # On-diagonal element; calculate indices to include ind = np . concatenate ( [ np . array ( list ( map ( lambda v : v % 3 , range ( i + 1 , i + 3 ) ) ) ) + o for o in range ( 0 , geom . shape [ 0 ] , 3 ) ] ) # Calculate the tensor element tensor [ i , i ] = np . multiply ( np . square ( geom [ ind ] ) , masses [ ind ] ) . sum ( ) else : # Off-diagonal element; calculate the indices ind_i = np . array ( range ( i , geom . shape [ 0 ] + i , 3 ) ) ind_j = np . array ( range ( j , geom . shape [ 0 ] + j , 3 ) ) # Calculate the tensor element and its symmetric partner tensor [ i , j ] = np . multiply ( np . sqrt ( np . multiply ( masses [ ind_i ] , masses [ ind_j ] ) ) , np . multiply ( geom [ ind_i ] , geom [ ind_j ] ) ) . sum ( ) * - 1 tensor [ j , i ] = tensor [ i , j ] ## end if ## next j ## next i # Return the tensor return tensor
1,391
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L164-L242
[ "def", "pair", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "pair", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "def", "callback", "(", "s", ")", ":", "print", "(", "s", ".", "pairline", ")", "Sam", "(", "args", "[", "0", "]", ",", "callback", "=", "callback", ")" ]
Rotational constants for a given molecular system .
def rot_consts ( geom , masses , units = _EURC . INV_INERTIA , on_tol = _DEF . ORTHONORM_TOL ) : # Imports import numpy as np from . . const import EnumTopType as ETT , EnumUnitsRotConst as EURC , PRM , PHYS # Ensure units are valid if not units in EURC : raise ValueError ( "'{0}' is not a valid units value" . format ( units ) ) ## end if # Retrieve the moments, axes and top type. Geom and masses are proofed # internally in this call. mom , ax , top = principals ( geom , masses , on_tol ) # Check for special cases if top == ETT . ATOM : # All moments are zero; set to zero-moment threshold mom = np . repeat ( PRM . ZERO_MOMENT_TOL , 3 ) elif top == ETT . LINEAR : # First moment is zero; set to zero-moment threshold mom [ 0 ] = PRM . ZERO_MOMENT_TOL ## end if # Calculate the values in the indicated units if units == EURC . INV_INERTIA : # 1/(amu*B^2) rc = 1.0 / ( 2.0 * mom ) elif units == EURC . ANGFREQ_ATOMIC : # 1/Ta rc = PHYS . PLANCK_BAR / ( 2.0 * mom * PHYS . ME_PER_AMU ) elif units == EURC . ANGFREQ_SECS : # 1/s rc = PHYS . PLANCK_BAR / ( 2.0 * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA elif units == EURC . CYCFREQ_ATOMIC : # cyc/Ta rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) elif units == EURC . CYCFREQ_HZ : # cyc/s rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA elif units == EURC . CYCFREQ_MHZ : # Mcyc/s rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA / 1.0e6 elif units == EURC . WAVENUM_ATOMIC : # cyc/B rc = PHYS . PLANCK / ( mom * PHYS . ME_PER_AMU ) / ( 8.0 * np . pi ** 2.0 * PHYS . LIGHT_SPEED ) elif units == EURC . WAVENUM_CM : # cyc/cm rc = PHYS . PLANCK / ( mom * PHYS . ME_PER_AMU ) / ( 8.0 * np . pi ** 2.0 * PHYS . LIGHT_SPEED * PHYS . ANG_PER_BOHR ) * 1.0e8 else : # pragma: no cover -- Valid units; not implemented raise NotImplementedError ( "Units conversion not yet implemented." ) ## end if # Return the result return rc
1,392
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L499-L598
[ "def", "_percent_match", "(", "result", ",", "out", ",", "yP", "=", "None", ",", "*", "argl", ")", ":", "if", "len", "(", "argl", ")", ">", "1", ":", "if", "yP", "is", "None", ":", "Xt", "=", "argl", "[", "1", "]", "key", "=", "id", "(", "Xt", ")", "if", "key", "in", "_splits", ":", "yP", "=", "_splits", "[", "key", "]", "[", "3", "]", "if", "yP", "is", "not", "None", ":", "import", "math", "out", "[", "\"%\"", "]", "=", "round", "(", "1.", "-", "sum", "(", "abs", "(", "yP", "-", "result", ")", ")", "/", "float", "(", "len", "(", "result", ")", ")", ",", "3", ")" ]
First non - zero Atomic Displacement Non - Orthogonal to Vec
def _fadn_orth ( vec , geom ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM from . . error import InertiaError from . vector import orthonorm_check as onchk # Geom and vec must both be the right shape if not ( len ( geom . shape ) == 1 and geom . shape [ 0 ] % 3 == 0 ) : raise ValueError ( "Geometry is not length 3N" ) ## end if if not vec . shape == ( 3 , ) : raise ValueError ( "Reference vector is not length 3" ) ## end if # vec must not be the zero vector if spla . norm ( vec ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Reference vector norm is too small" ) ## end if # Normalize the ref vec vec = vec / spla . norm ( vec ) # Iterate over reshaped geometry for disp in geom . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) : # See if the displacement is nonzero and not orthonormal. Trailing # [0] index is to retrieve only the success/fail bool. if spla . norm ( disp ) >= PRM . ZERO_VEC_TOL and not onchk ( np . column_stack ( ( disp / spla . norm ( disp ) , vec / spla . norm ( vec ) ) ) ) [ 0 ] : # This is the displacement you are looking for out_vec = disp / spla . norm ( disp ) return out_vec ## end if ## end if ## next disp else : # Nothing fit the bill - must be atom, linear, or planar raise InertiaError ( InertiaError . BAD_GEOM , "No suitable atomic displacement found" , "" )
1,393
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L604-L667
[ "def", "load_clubs", "(", "self", ")", ":", "user_clubs", "=", "self", ".", "session", ".", "session", ".", "get", "(", "u'http://myanimelist.net/profile/'", "+", "utilities", ".", "urlencode", "(", "self", ".", "username", ")", "+", "u'/clubs'", ")", ".", "text", "self", ".", "set", "(", "self", ".", "parse_clubs", "(", "utilities", ".", "get_clean_dom", "(", "user_clubs", ")", ")", ")", "return", "self" ]
First non - zero Atomic Displacement that is Non - Parallel with Vec
def _fadn_par ( vec , geom ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM from . . error import InertiaError from . vector import parallel_check as parchk # Geom and vec must both be the right shape if not ( len ( geom . shape ) == 1 and geom . shape [ 0 ] % 3 == 0 ) : raise ValueError ( "Geometry is not length 3N" ) ## end if if not vec . shape == ( 3 , ) : raise ValueError ( "Reference vector is not length 3" ) ## end if # vec must not be the zero vector if spla . norm ( vec ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Reference vector norm is too small" ) ## end if # Normalize the ref vec vec = vec / spla . norm ( vec ) # Iterate over reshaped geometry for disp in geom . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) : # See if the displacement is nonzero and nonparallel to the ref vec if spla . norm ( disp ) >= PRM . ZERO_VEC_TOL and not parchk ( disp . reshape ( 3 ) , vec ) : # This is the displacement you are looking for out_vec = disp / spla . norm ( disp ) break ## end if ## end if ## next disp else : # Nothing fit the bill - must be a linear molecule? raise InertiaError ( InertiaError . BAD_GEOM , "Linear molecule, no non-parallel displacement" , "" ) ## end for disp # Return the resulting vector return out_vec
1,394
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L674-L740
[ "async", "def", "get_wallet_record", "(", "wallet_handle", ":", "int", ",", "type_", ":", "str", ",", "id", ":", "str", ",", "options_json", ":", "str", ")", "->", "str", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"get_wallet_record: >>> wallet_handle: %r, type_: %r, id: %r, options_json: %r\"", ",", "wallet_handle", ",", "type_", ",", "id", ",", "options_json", ")", "if", "not", "hasattr", "(", "get_wallet_record", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"get_wallet_record: Creating callback\"", ")", "get_wallet_record", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ")", ")", "c_wallet_handle", "=", "c_int32", "(", "wallet_handle", ")", "c_type", "=", "c_char_p", "(", "type_", ".", "encode", "(", "'utf-8'", ")", ")", "c_id", "=", "c_char_p", "(", "id", ".", "encode", "(", "'utf-8'", ")", ")", "c_options_json", "=", "c_char_p", "(", "options_json", ".", "encode", "(", "'utf-8'", ")", ")", "wallet_record", "=", "await", "do_call", "(", "'indy_get_wallet_record'", ",", "c_wallet_handle", ",", "c_type", ",", "c_id", ",", "c_options_json", ",", "get_wallet_record", ".", "cb", ")", "res", "=", "wallet_record", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"get_wallet_record: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Extract a set of reference contexts for each variant in the collection .
def reference_contexts_for_variants ( variants , context_size , transcript_id_whitelist = None ) : result = OrderedDict ( ) for variant in variants : result [ variant ] = reference_contexts_for_variant ( variant = variant , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) return result
1,395
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L141-L168
[ "def", "_get_device_info", "(", "self", ",", "device_id", ")", ":", "params", "=", "{", "'access_token'", ":", "self", ".", "access_token", "}", "r", "=", "self", ".", "spark_api", "(", "device_id", ")", ".", "GET", "(", "params", "=", "params", ",", "timeout", "=", "30", ")", "self", ".", "_check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
Given a collection of variants find all reference sequence contexts around each variant .
def variants_to_reference_contexts_dataframe ( variants , context_size , transcript_id_whitelist = None ) : df_builder = DataFrameBuilder ( ReferenceContext , exclude = [ "variant" ] , converters = dict ( transcripts = lambda ts : ";" . join ( t . name for t in ts ) ) , extra_column_fns = { "gene" : lambda variant , _ : ";" . join ( variant . gene_names ) , } ) for variant , reference_contexts in reference_contexts_for_variants ( variants = variants , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) . items ( ) : df_builder . add_many ( variant , reference_contexts ) return df_builder . to_dataframe ( )
1,396
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L170-L205
[ "def", "logout", "(", "self", ")", ":", "# Check if all transfers are complete before logout", "self", ".", "transfers_complete", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", "}", "method", ",", "url", "=", "get_URL", "(", "'logout'", ")", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "self", ".", "session", ".", "cookies", "[", "'logintoken'", "]", "=", "None", "return", "True", "hellraiser", "(", "res", ")" ]
Two - dimensional oriented exponential decay pattern .
def exponential ( x , y , xscale , yscale ) : if xscale == 0.0 or yscale == 0.0 : return x * 0.0 with float_error_ignore ( ) : x_w = np . divide ( x , xscale ) y_h = np . divide ( y , yscale ) return np . exp ( - np . sqrt ( x_w * x_w + y_h * y_h ) )
1,397
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L82-L92
[ "def", "unregister", "(", "self", ",", "entity_class", ",", "entity", ")", ":", "EntityState", ".", "release", "(", "entity", ",", "self", ")", "self", ".", "__entity_set_map", "[", "entity_class", "]", ".", "remove", "(", "entity", ")" ]
Infinite - length line with a solid central region then Gaussian fall - off at the edges .
def line ( y , thickness , gaussian_width ) : distance_from_line = abs ( y ) gaussian_y_coord = distance_from_line - thickness / 2.0 sigmasq = gaussian_width * gaussian_width if sigmasq == 0.0 : falloff = y * 0.0 else : with float_error_ignore ( ) : falloff = np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq ) ) return np . where ( gaussian_y_coord <= 0 , 1.0 , falloff )
1,398
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L114-L128
[ "def", "getDatabaseStats", "(", "self", ")", ":", "headers", "=", "(", "'datname'", ",", "'numbackends'", ",", "'xact_commit'", ",", "'xact_rollback'", ",", "'blks_read'", ",", "'blks_hit'", ",", "'tup_returned'", ",", "'tup_fetched'", ",", "'tup_inserted'", ",", "'tup_updated'", ",", "'tup_deleted'", ",", "'disk_size'", ")", "cur", "=", "self", ".", "_conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"SELECT %s, pg_database_size(datname) FROM pg_stat_database;\"", "%", "\",\"", ".", "join", "(", "headers", "[", ":", "-", "1", "]", ")", ")", "rows", "=", "cur", ".", "fetchall", "(", ")", "dbstats", "=", "self", ".", "_createStatsDict", "(", "headers", ",", "rows", ")", "totals", "=", "self", ".", "_createTotalsDict", "(", "headers", ",", "rows", ")", "return", "{", "'databases'", ":", "dbstats", ",", "'totals'", ":", "totals", "}" ]
Circular disk with Gaussian fall - off after the solid central region .
def disk ( x , y , height , gaussian_width ) : disk_radius = height / 2.0 distance_from_origin = np . sqrt ( x ** 2 + y ** 2 ) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width * gaussian_width if sigmasq == 0.0 : falloff = x * 0.0 else : with float_error_ignore ( ) : falloff = np . exp ( np . divide ( - distance_outside_disk * distance_outside_disk , 2 * sigmasq ) ) return np . where ( distance_outside_disk <= 0 , 1.0 , falloff )
1,399
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L131-L148
[ "def", "action_display", "(", "self", ")", ":", "action", "=", "self", "[", "'action'", "]", "annotations", "=", "[", "]", "abbr", "=", "self", ".", "bill", "[", "settings", ".", "LEVEL_FIELD", "]", "if", "'related_entities'", "in", "self", ":", "for", "entity", "in", "self", "[", "'related_entities'", "]", ":", "name", "=", "entity", "[", "'name'", "]", "_id", "=", "entity", "[", "'id'", "]", "# If the importer couldn't ID the entity,", "# skip.", "if", "_id", "is", "None", ":", "continue", "url", "=", "mongoid_2_url", "(", "abbr", ",", "_id", ")", "link", "=", "'<a href=\"%s\">%s</a>'", "%", "(", "url", ",", "name", ")", "if", "name", "in", "action", ":", "action", "=", "action", ".", "replace", "(", "entity", "[", "'name'", "]", ",", "link", ")", "else", ":", "annotations", ".", "append", "(", "link", ")", "if", "annotations", ":", "action", "+=", "' (%s)'", "%", "', '", ".", "join", "(", "annotations", ")", "return", "action" ]