query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Yields the fluents tensors .
def _tensors ( cls , fluents : Sequence [ FluentPair ] ) -> Iterable [ tf . Tensor ] : for _ , fluent in fluents : tensor = cls . _output_size ( fluent . tensor ) yield tensor
12,200
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L157-L161
[ "def", "cross_idl", "(", "lon1", ",", "lon2", ",", "*", "lons", ")", ":", "lons", "=", "(", "lon1", ",", "lon2", ")", "+", "lons", "l1", ",", "l2", "=", "min", "(", "lons", ")", ",", "max", "(", "lons", ")", "# a line crosses the international date line if the end positions", "# have different sign and they are more than 180 degrees longitude apart", "return", "l1", "*", "l2", "<", "0", "and", "abs", "(", "l1", "-", "l2", ")", ">", "180" ]
Converts tensor to tf . float32 datatype if needed .
def _dtype ( cls , tensor : tf . Tensor ) -> tf . Tensor : if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) return tensor
12,201
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L164-L168
[ "def", "stop", "(", "self", ")", ":", "log", ".", "info", "(", "'Stopping te kafka listener class'", ")", "self", ".", "consumer", ".", "unsubscribe", "(", ")", "self", ".", "consumer", ".", "close", "(", ")" ]
Returns output tensors for fluents .
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : return tuple ( cls . _dtype ( t ) for t in cls . _tensors ( fluents ) )
12,202
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L171-L173
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
Returns the simulation output size .
def output_size ( self ) -> Tuple [ Sequence [ Shape ] , Sequence [ Shape ] , Sequence [ Shape ] , int ] : return self . _cell . output_size
12,203
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L214-L216
[ "def", "share", "(", "self", ",", "group_id", ",", "group_access", ",", "expires_at", "=", "None", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'/projects/%s/share'", "%", "self", ".", "get_id", "(", ")", "data", "=", "{", "'group_id'", ":", "group_id", ",", "'group_access'", ":", "group_access", ",", "'expires_at'", ":", "expires_at", "}", "self", ".", "manager", ".", "gitlab", ".", "http_post", "(", "path", ",", "post_data", "=", "data", ",", "*", "*", "kwargs", ")" ]
Returns the input tensor for the given horizon .
def timesteps ( self , horizon : int ) -> tf . Tensor : start , limit , delta = horizon - 1 , - 1 , - 1 timesteps_range = tf . range ( start , limit , delta , dtype = tf . float32 ) timesteps_range = tf . expand_dims ( timesteps_range , - 1 ) batch_timesteps = tf . stack ( [ timesteps_range ] * self . batch_size ) return batch_timesteps
12,204
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L218-L224
[ "def", "return_port", "(", "port", ")", ":", "if", "port", "in", "_random_ports", ":", "_random_ports", ".", "remove", "(", "port", ")", "elif", "port", "in", "_owned_ports", ":", "_owned_ports", ".", "remove", "(", "port", ")", "_free_ports", ".", "add", "(", "port", ")", "elif", "port", "in", "_free_ports", ":", "logging", ".", "info", "(", "\"Returning a port that was already returned: %s\"", ",", "port", ")", "else", ":", "logging", ".", "info", "(", "\"Returning a port that wasn't given by portpicker: %s\"", ",", "port", ")" ]
Returns the ops for the trajectory generation with given horizon and initial_state .
def trajectory ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> TrajectoryOutput : if initial_state is None : initial_state = self . _cell . initial_state ( ) with self . graph . as_default ( ) : self . inputs = self . timesteps ( horizon ) outputs , _ = tf . nn . dynamic_rnn ( self . _cell , self . inputs , initial_state = initial_state , dtype = tf . float32 , scope = "trajectory" ) states , actions , interms , rewards = outputs # fluent types state_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . state_range_type ) states = self . _output ( states , state_dtype ) interm_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . interm_range_type ) interms = self . _output ( interms , interm_dtype ) action_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . action_range_type ) actions = self . _output ( actions , action_dtype ) outputs = ( initial_state , states , actions , interms , rewards ) return outputs
12,205
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L226-L272
[ "def", "add_size_info", "(", "self", ")", ":", "if", "self", ".", "headers", "and", "\"Content-Length\"", "in", "self", ".", "headers", "and", "\"Transfer-Encoding\"", "not", "in", "self", ".", "headers", ":", "# Note that content-encoding causes size differences since", "# the content data is always decoded.", "try", ":", "self", ".", "size", "=", "int", "(", "self", ".", "getheader", "(", "\"Content-Length\"", ")", ")", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "pass", "else", ":", "self", ".", "size", "=", "-", "1" ]
Builds the MDP graph and simulates in batch the trajectories with given horizon . Returns the non - fluents states actions interms and rewards . Fluents and non - fluents are returned in factored form .
def run ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> SimulationOutput : trajectory = self . trajectory ( horizon , initial_state ) with tf . Session ( graph = self . graph ) as sess : sess . run ( tf . global_variables_initializer ( ) ) non_fluents = sess . run ( self . _non_fluents ) initial_state , states , actions , interms , rewards = sess . run ( trajectory ) # non-fluents non_fluent_ordering = self . _cell . _compiler . rddl . domain . non_fluent_ordering non_fluents = tuple ( zip ( non_fluent_ordering , non_fluents ) ) # states state_fluent_ordering = self . _cell . _compiler . rddl . domain . state_fluent_ordering states = tuple ( zip ( state_fluent_ordering , states ) ) # interms interm_fluent_ordering = self . _cell . _compiler . rddl . domain . interm_fluent_ordering interms = tuple ( zip ( interm_fluent_ordering , interms ) ) # actions action_fluent_ordering = self . _cell . _compiler . rddl . domain . action_fluent_ordering actions = tuple ( zip ( action_fluent_ordering , actions ) ) # rewards rewards = np . squeeze ( rewards ) outputs = ( non_fluents , initial_state , states , actions , interms , rewards ) return outputs
12,206
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L274-L320
[ "def", "get_best_local_timezone", "(", ")", ":", "zone_name", "=", "tzlocal", ".", "get_localzone", "(", ")", ".", "zone", "if", "zone_name", "in", "pytz", ".", "all_timezones", ":", "return", "zone_name", "if", "time", ".", "daylight", ":", "local_offset", "=", "time", ".", "altzone", "localtz", "=", "time", ".", "tzname", "[", "1", "]", "else", ":", "local_offset", "=", "time", ".", "timezone", "localtz", "=", "time", ".", "tzname", "[", "0", "]", "local_offset", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "-", "local_offset", ")", "for", "zone_name", "in", "pytz", ".", "all_timezones", ":", "timezone", "=", "pytz", ".", "timezone", "(", "zone_name", ")", "if", "not", "hasattr", "(", "timezone", ",", "'_tzinfos'", ")", ":", "continue", "for", "utcoffset", ",", "daylight", ",", "tzname", "in", "timezone", ".", "_tzinfos", ":", "if", "utcoffset", "==", "local_offset", "and", "tzname", "==", "localtz", ":", "return", "zone_name" ]
Converts tensors to the corresponding dtypes .
def _output ( cls , tensors : Sequence [ tf . Tensor ] , dtypes : Sequence [ tf . DType ] ) -> Sequence [ tf . Tensor ] : outputs = [ ] for tensor , dtype in zip ( tensors , dtypes ) : tensor = tensor [ 0 ] if tensor . dtype != dtype : tensor = tf . cast ( tensor , dtype ) outputs . append ( tensor ) return tuple ( outputs )
12,207
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L323-L333
[ "def", "cfg", "(", "self", ")", ":", "config", "=", "LStruct", "(", "self", ".", "defaults", ")", "module", "=", "config", "[", "'CONFIG'", "]", "=", "os", ".", "environ", ".", "get", "(", "CONFIGURATION_ENVIRON_VARIABLE", ",", "config", "[", "'CONFIG'", "]", ")", "if", "module", ":", "try", ":", "module", "=", "import_module", "(", "module", ")", "config", ".", "update", "(", "{", "name", ":", "getattr", "(", "module", ",", "name", ")", "for", "name", "in", "dir", "(", "module", ")", "if", "name", "==", "name", ".", "upper", "(", ")", "and", "not", "name", ".", "startswith", "(", "'_'", ")", "}", ")", "except", "ImportError", "as", "exc", ":", "config", ".", "CONFIG", "=", "None", "self", ".", "logger", ".", "error", "(", "\"Error importing %s: %s\"", ",", "module", ",", "exc", ")", "# Patch configuration from ENV", "for", "name", "in", "config", ":", "if", "name", ".", "startswith", "(", "'_'", ")", "or", "name", "!=", "name", ".", "upper", "(", ")", "or", "name", "not", "in", "os", ".", "environ", ":", "continue", "try", ":", "config", "[", "name", "]", "=", "json", ".", "loads", "(", "os", ".", "environ", "[", "name", "]", ")", "except", "ValueError", ":", "pass", "return", "config" ]
Creates a bi - allelic variant .
def _get_biallelic_variant ( self , variant , info , _check_alleles = True ) : info = info . iloc [ 0 , : ] assert not info . multiallelic # Seeking and parsing the file self . _impute2_file . seek ( info . seek ) genotypes = self . _parse_impute2_line ( self . _impute2_file . readline ( ) ) variant_alleles = variant . _encode_alleles ( [ genotypes . reference , genotypes . coded , ] ) if ( _check_alleles and variant_alleles != variant . alleles ) : # Variant with requested alleles is unavailable. logging . variant_not_found ( variant ) return [ ] return [ genotypes ]
12,208
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/impute2.py#L222-L239
[ "def", "restart", "(", "self", ",", "timeout", "=", "None", ")", ":", "msg", "=", "{", "\"value\"", ":", "\"Restart requested by \"", "+", "self", ".", "username", "+", "\"via the Splunk SDK for Python\"", "}", "# This message will be deleted once the server actually restarts.", "self", ".", "messages", ".", "create", "(", "name", "=", "\"restart_required\"", ",", "*", "*", "msg", ")", "result", "=", "self", ".", "post", "(", "\"server/control/restart\"", ")", "if", "timeout", "is", "None", ":", "return", "result", "start", "=", "datetime", ".", "now", "(", ")", "diff", "=", "timedelta", "(", "seconds", "=", "timeout", ")", "while", "datetime", ".", "now", "(", ")", "-", "start", "<", "diff", ":", "try", ":", "self", ".", "login", "(", ")", "if", "not", "self", ".", "restart_required", ":", "return", "result", "except", "Exception", "as", "e", ":", "sleep", "(", "1", ")", "raise", "Exception", "(", "\"Operation time out.\"", ")" ]
Fixes a genotypes object ( variant name multi - allelic value .
def _fix_genotypes_object ( self , genotypes , variant_info ) : # Checking the name (if there were duplications) if self . has_index and variant_info . name != genotypes . variant . name : if not variant_info . name . startswith ( genotypes . variant . name ) : raise ValueError ( "Index file not synced with IMPUTE2 file" ) genotypes . variant . name = variant_info . name # Trying to set multi-allelic information if self . has_index and self . _index_has_location : # Location was in the index, so we can automatically set the # multi-allelic state of the genotypes genotypes . multiallelic = variant_info . multiallelic else : # Location was not in the index, so we check one marker before and # after the one we found logging . warning ( "Multiallelic variants are not detected on " "unindexed files." )
12,209
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/impute2.py#L394-L412
[ "def", "_log10_Inorm_extern_planckint", "(", "self", ",", "Teff", ")", ":", "log10_Inorm", "=", "libphoebe", ".", "wd_planckint", "(", "Teff", ",", "self", ".", "extern_wd_idx", ",", "self", ".", "wd_data", "[", "\"planck_table\"", "]", ")", "return", "log10_Inorm" ]
Normalize a plink genotype vector .
def _normalize_missing ( g ) : g = g . astype ( float ) g [ g == - 1.0 ] = np . nan return g
12,210
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/plink.py#L276-L280
[ "def", "clean_start_time", "(", "self", ")", ":", "start", "=", "self", ".", "cleaned_data", ".", "get", "(", "'start_time'", ")", "if", "not", "start", ":", "return", "start", "active_entries", "=", "self", ".", "user", ".", "timepiece_entries", ".", "filter", "(", "start_time__gte", "=", "start", ",", "end_time__isnull", "=", "True", ")", "for", "entry", "in", "active_entries", ":", "output", "=", "(", "'The start time is on or before the current entry: '", "'%s - %s starting at %s'", "%", "(", "entry", ".", "project", ",", "entry", ".", "activity", ",", "entry", ".", "start_time", ".", "strftime", "(", "'%H:%M:%S'", ")", ")", ")", "raise", "forms", ".", "ValidationError", "(", "output", ")", "return", "start" ]
Download and extract the tarball from Alex s website .
def maybe_download_and_extract ( ) : dest_directory = "/tmp/cifar" if not os . path . exists ( dest_directory ) : os . makedirs ( dest_directory ) filename = DATA_URL . split ( '/' ) [ - 1 ] filepath = os . path . join ( dest_directory , filename ) if not os . path . exists ( filepath ) : def _progress ( count , block_size , total_size ) : sys . stdout . write ( '\r>> Downloading %s %.1f%%' % ( filename , float ( count * block_size ) / float ( total_size ) * 100.0 ) ) sys . stdout . flush ( ) filepath , _ = urllib . request . urlretrieve ( DATA_URL , filepath , _progress ) print ( ) statinfo = os . stat ( filepath ) print ( 'Successfully downloaded' , filename , statinfo . st_size , 'bytes.' ) tarfile . open ( filepath , 'r:gz' ) . extractall ( dest_directory )
12,211
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/cifar_utils.py#L73-L89
[ "def", "associate_flavor", "(", "self", ",", "flavor", ",", "body", ")", ":", "return", "self", ".", "post", "(", "self", ".", "flavor_profile_bindings_path", "%", "(", "flavor", ")", ",", "body", "=", "body", ")" ]
Plot a single CIFAR image .
def plot ( config , image , file ) : image = np . squeeze ( image ) print ( file , image . shape ) imsave ( file , image )
12,212
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/cifar_utils.py#L91-L95
[ "def", "start_vm", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "vm_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Compute/virtualMachines/'", ",", "vm_name", ",", "'/start'", ",", "'?api-version='", ",", "COMP_API", "]", ")", "return", "do_post", "(", "endpoint", ",", "''", ",", "access_token", ")" ]
Predict the filetype from a handle s name
def _get_seqtype_from_ext ( handle ) : if isinstance ( handle , basestring ) : name = handle elif hasattr ( handle , 'filename' ) : name = handle . filename elif hasattr ( handle , 'name' ) : name = handle . name else : raise ValueError ( "Unknown datatype for handle!" ) modifier = '' dummy , ext = path . splitext ( name . lower ( ) ) if ext == ".gz" : modifier = 'gz-' dummy , ext = path . splitext ( dummy ) if not ext : ext = "." + dummy if ext in ( ".gbk" , ".gb" , ".genbank" , ".gbff" ) : return modifier + "genbank" elif ext in ( ".embl" , ".emb" ) : return modifier + "embl" elif ext in ( ".fa" , ".fasta" , ".fna" , ".faa" , ".fas" ) : return modifier + "fasta" else : raise ValueError ( "Unknown file format '%s'." % ext )
12,213
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L28-L55
[ "def", "summarization", "(", "self", ",", "summarization", ")", ":", "allowed_values", "=", "[", "\"MEAN\"", ",", "\"MEDIAN\"", ",", "\"MIN\"", ",", "\"MAX\"", ",", "\"SUM\"", ",", "\"COUNT\"", ",", "\"LAST\"", ",", "\"FIRST\"", "]", "# noqa: E501", "if", "summarization", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `summarization` ({0}), must be one of {1}\"", "# noqa: E501", ".", "format", "(", "summarization", ",", "allowed_values", ")", ")", "self", ".", "_summarization", "=", "summarization" ]
Guess the sequence type from the file s contents
def _guess_seqtype_from_file ( handle ) : if isinstance ( handle , basestring ) : handle = StringIO ( handle ) for line in handle : if not line . strip ( ) : continue if line . lstrip ( ) . split ( ) [ 0 ] in ( 'LOCUS' , 'FEATURES' , 'source' , 'CDS' , 'gene' ) : return 'genbank' if len ( line ) > 2 and line [ : 3 ] in ( 'ID ' , 'FT ' ) : return 'embl' if line . startswith ( '>' ) : return 'fasta' handle . seek ( 0 ) import string from Bio . Data import IUPACData as iupac all_input_letters = set ( handle . read ( ) . lower ( ) ) all_valid = set ( string . digits ) all_valid . update ( set ( iupac . protein_letters . lower ( ) ) ) all_valid . update ( set ( iupac . unambiguous_dna_letters . lower ( ) ) ) all_valid . update ( set ( '- \n' ) ) if all_valid . issuperset ( all_input_letters ) : return 'fasta' raise ValueError ( "Failed to guess format for input" )
12,214
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L58-L84
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Transparently unzip the file handle
def _unzip_handle ( handle ) : if isinstance ( handle , basestring ) : handle = _gzip_open_filename ( handle ) else : handle = _gzip_open_handle ( handle ) return handle
12,215
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L87-L93
[ "def", "choose_database_name", "(", "metadata", ",", "config", ")", ":", "if", "config", ".", "database_name", "is", "not", "None", ":", "# we allow -- but do not encourage -- database name configuration", "return", "config", ".", "database_name", "if", "metadata", ".", "testing", ":", "# by convention, we provision different databases for unit testing and runtime", "return", "f\"{metadata.name}_test_db\"", "return", "f\"{metadata.name}_db\"" ]
Sanity check for insdcio style files
def sanity_check_insdcio ( handle , id_marker , fake_id_line ) : found_id = False found_end_marker = False for line in handle : line = line . strip ( ) if not line : continue if line . startswith ( id_marker ) : found_id = True break if line . startswith ( '//' ) : found_end_marker = True break handle . seek ( 0 ) # We found an ID, file looks good. if found_id : return handle # If there's no ID and no end marker, just give up. if not found_end_marker : return handle # If we found an end marker but no ID, fake one. new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_id_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
12,216
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L117-L146
[ "def", "GetRootKey", "(", "self", ")", ":", "root_registry_key", "=", "virtual", ".", "VirtualWinRegistryKey", "(", "''", ")", "for", "mapped_key", "in", "self", ".", "_MAPPED_KEYS", ":", "key_path_segments", "=", "key_paths", ".", "SplitKeyPath", "(", "mapped_key", ")", "if", "not", "key_path_segments", ":", "continue", "registry_key", "=", "root_registry_key", "for", "name", "in", "key_path_segments", "[", ":", "-", "1", "]", ":", "sub_registry_key", "=", "registry_key", ".", "GetSubkeyByName", "(", "name", ")", "if", "not", "sub_registry_key", ":", "sub_registry_key", "=", "virtual", ".", "VirtualWinRegistryKey", "(", "name", ")", "registry_key", ".", "AddSubkey", "(", "sub_registry_key", ")", "registry_key", "=", "sub_registry_key", "sub_registry_key", "=", "registry_key", ".", "GetSubkeyByName", "(", "key_path_segments", "[", "-", "1", "]", ")", "if", "(", "not", "sub_registry_key", "and", "isinstance", "(", "registry_key", ",", "virtual", ".", "VirtualWinRegistryKey", ")", ")", ":", "sub_registry_key", "=", "virtual", ".", "VirtualWinRegistryKey", "(", "key_path_segments", "[", "-", "1", "]", ",", "registry", "=", "self", ")", "registry_key", ".", "AddSubkey", "(", "sub_registry_key", ")", "return", "root_registry_key" ]
Sanity check FASTA files .
def sanity_check_fasta ( handle ) : header_found = False for line in handle : if line . startswith ( '>' ) : header_found = True break handle . seek ( 0 ) if header_found : return handle fake_header_line = ">DUMMY" new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_header_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
12,217
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L163-L181
[ "def", "_put_bucket_policy", "(", "self", ")", ":", "if", "self", ".", "s3props", "[", "'bucket_policy'", "]", ":", "policy_str", "=", "json", ".", "dumps", "(", "self", ".", "s3props", "[", "'bucket_policy'", "]", ")", "_response", "=", "self", ".", "s3client", ".", "put_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ",", "Policy", "=", "policy_str", ")", "else", ":", "_response", "=", "self", ".", "s3client", ".", "delete_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ")", "LOG", ".", "debug", "(", "'Response adding bucket policy: %s'", ",", "_response", ")", "LOG", ".", "info", "(", "'S3 Bucket Policy Attached'", ")" ]
Wrap SeqIO . parse
def parse ( handle , seqtype = None , robust = False ) : if seqtype is None : seqtype = _get_seqtype_from_ext ( handle ) if seqtype . startswith ( 'gz-' ) : handle = _unzip_handle ( handle ) seqtype = seqtype [ 3 : ] # False positive from pylint, both handles are fileobj-like # pylint: disable=redefined-variable-type if robust : if seqtype == "embl" : handle = sanity_check_embl ( handle ) elif seqtype == "genbank" : handle = sanity_check_genbank ( handle ) elif seqtype == "fasta" : handle = sanity_check_fasta ( handle ) # pylint: enable=redefined-variable-type return SeqIO . parse ( handle , seqtype )
12,218
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L184-L204
[ "def", "remove", "(", "self", ",", "value", ",", "_sa_initiator", "=", "None", ")", ":", "key", "=", "self", ".", "keyfunc", "(", "value", ")", "# Let self[key] raise if key is not in this collection", "# testlib.pragma exempt:__ne__", "if", "not", "self", ".", "__contains__", "(", "key", ")", "or", "value", "not", "in", "self", "[", "key", "]", ":", "raise", "sa_exc", ".", "InvalidRequestError", "(", "\"Can not remove '%s': collection holds '%s' for key '%s'. \"", "\"Possible cause: is the MappedCollection key function \"", "\"based on mutable properties or properties that only obtain \"", "\"values after flush?\"", "%", "(", "value", ",", "self", "[", "key", "]", ",", "key", ")", ")", "self", ".", "__getitem__", "(", "key", ",", "_sa_initiator", ")", ".", "remove", "(", "value", ")" ]
Returns whether the order of ports is fixed .
def isOrderFixed ( self ) : return ( self == PortConstraints . FIXED_ORDER or self == PortConstraints . FIXED_RATIO or self == PortConstraints . FIXED_POS )
12,219
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/containers/constants.py#L98-L106
[ "def", "destroy_sns_event", "(", "app_name", ",", "env", ",", "region", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "sns_client", "=", "session", ".", "client", "(", "'sns'", ")", "lambda_subscriptions", "=", "get_sns_subscriptions", "(", "app_name", "=", "app_name", ",", "env", "=", "env", ",", "region", "=", "region", ")", "for", "subscription_arn", "in", "lambda_subscriptions", ":", "sns_client", ".", "unsubscribe", "(", "SubscriptionArn", "=", "subscription_arn", ")", "LOG", ".", "debug", "(", "\"Lambda SNS event deleted\"", ")", "return", "True" ]
Given a List of Dictionaries with uniform keys returns a single Dictionary with keys holding a List of values matching the key in the original List .
def _dicts_to_columns ( dicts ) : keys = dicts [ 0 ] . keys ( ) result = dict ( ( k , [ ] ) for k in keys ) for d in dicts : for k , v in d . items ( ) : result [ k ] += [ v ] return result
12,220
https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L25-L43
[ "def", "_update_eof", "(", "self", ")", ":", "self", ".", "_aftermathmp", "(", ")", "self", ".", "_ifile", ".", "close", "(", ")", "self", ".", "_flag_e", "=", "True" ]
This representation assumes that vertices and edges are encoded in two lists each list containing a Python dict for each vertex and each edge respectively . A distinguished element of the vertex dicts contain a vertex ID which is used in the edge dicts to refer to source and target vertices . All the remaining elements of the dicts are considered vertex and edge attributes .
def from_vertices_and_edges ( vertices , edges , vertex_name_key = 'name' , vertex_id_key = 'id' , edge_foreign_keys = ( 'source' , 'target' ) , directed = True ) : vertex_data = _dicts_to_columns ( vertices ) edge_data = _dicts_to_columns ( edges ) n = len ( vertices ) vertex_index = dict ( zip ( vertex_data [ vertex_id_key ] , range ( n ) ) ) # Iterate over `edges` to create `edge_list`, where every list item is a pair of integers. edge_list = list ( map ( lambda source , target : ( vertex_index [ source ] , vertex_index [ target ] ) , edge_data [ edge_foreign_keys [ 0 ] ] , edge_data [ edge_foreign_keys [ 1 ] ] ) ) g = IGraph ( n = n , edges = edge_list , directed = directed , vertex_attrs = vertex_data , edge_attrs = edge_data ) g . vs [ 'name' ] = g . vs [ vertex_name_key ] g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ vertex_name_key ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
12,221
https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L46-L84
[ "def", "dead_chips", "(", "self", ")", ":", "for", "x", "in", "range", "(", "self", ".", "width", ")", ":", "for", "y", "in", "range", "(", "self", ".", "height", ")", ":", "if", "(", "x", ",", "y", ")", "not", "in", "self", ":", "yield", "(", "x", ",", "y", ")" ]
Given a List of Dictionaries with source target and weight attributes return a weighted directed graph .
def from_edges ( edges , source_key = 'source' , target_key = 'target' , weight_key = 'weight' , directed = True ) : raw = list ( map ( lambda x : [ x [ source_key ] , x [ target_key ] , int ( x [ weight_key ] ) ] , edges ) ) g = IGraph . TupleList ( raw , weights = True , directed = directed ) g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ 'name' ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
12,222
https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L87-L98
[ "def", "add_quantity_modifier", "(", "self", ",", "quantity", ",", "modifier", ",", "overwrite", "=", "False", ")", ":", "if", "quantity", "in", "self", ".", "_quantity_modifiers", "and", "not", "overwrite", ":", "raise", "ValueError", "(", "'quantity `{}` already exists'", ".", "format", "(", "quantity", ")", ")", "self", ".", "_quantity_modifiers", "[", "quantity", "]", "=", "modifier", "self", ".", "_check_quantities_exist", "(", "[", "quantity", "]", ",", "raise_exception", "=", "False", ")" ]
Flip the alleles of an Genotypes instance .
def flip_alleles ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.flip_coded'" , DeprecationWarning ) genotypes . reference , genotypes . coded = ( genotypes . coded , genotypes . reference ) genotypes . genotypes = 2 - genotypes . genotypes return genotypes
12,223
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L45-L51
[ "def", "pull_session", "(", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "io_loop", "=", "None", ",", "arguments", "=", "None", ")", ":", "coords", "=", "_SessionCoordinates", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ")", "session", "=", "ClientSession", "(", "session_id", "=", "session_id", ",", "websocket_url", "=", "websocket_url_for_server_url", "(", "coords", ".", "url", ")", ",", "io_loop", "=", "io_loop", ",", "arguments", "=", "arguments", ")", "session", ".", "pull", "(", ")", "return", "session" ]
Encode the genotypes with respect to the minor allele .
def code_minor ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.code_minor'" , DeprecationWarning ) _ , minor_coded = maf ( genotypes ) if not minor_coded : return flip_alleles ( genotypes ) return genotypes
12,224
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L54-L69
[ "def", "export_dashboards", "(", "session", ")", ":", "logging", ".", "info", "(", "'Starting export'", ")", "dashboards", "=", "session", ".", "query", "(", "Dashboard", ")", "dashboard_ids", "=", "[", "]", "for", "dashboard", "in", "dashboards", ":", "dashboard_ids", ".", "append", "(", "dashboard", ".", "id", ")", "data", "=", "Dashboard", ".", "export_dashboards", "(", "dashboard_ids", ")", "return", "data" ]
Computes the MAF and returns a boolean indicating if the minor allele is currently the coded allele .
def maf ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.maf'" , DeprecationWarning ) g = genotypes . genotypes maf = np . nansum ( g ) / ( 2 * np . sum ( ~ np . isnan ( g ) ) ) if maf > 0.5 : maf = 1 - maf return maf , False return maf , True
12,225
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L72-L84
[ "def", "_wait_and_except_if_failed", "(", "self", ",", "event", ",", "timeout", "=", "None", ")", ":", "event", ".", "wait", "(", "timeout", "or", "self", ".", "__sync_timeout", ")", "self", ".", "_except_if_failed", "(", "event", ")" ]
Convert a genotype object to a pandas dataframe .
def genotype_to_df ( g , samples , as_string = False ) : name = g . variant . name if g . variant . name else "genotypes" df = pd . DataFrame ( g . genotypes , index = samples , columns = [ name ] ) if as_string : df [ "alleles" ] = None hard_calls = df [ name ] . round ( ) df . loc [ hard_calls == 0 , "alleles" ] = "{0}/{0}" . format ( g . reference ) df . loc [ hard_calls == 1 , "alleles" ] = "{0}/{1}" . format ( g . reference , g . coded ) df . loc [ hard_calls == 2 , "alleles" ] = "{0}/{0}" . format ( g . coded ) df = df [ [ "alleles" ] ] df . columns = [ name ] return df
12,226
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L133-L155
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Compute LD between a marker and a list of markers .
def compute_ld ( cur_geno , other_genotypes , r2 = False ) : # Normalizing the current genotypes norm_cur = normalize_genotypes ( cur_geno ) # Normalizing and creating the matrix for the other genotypes norm_others = np . stack ( tuple ( normalize_genotypes ( g ) for g in other_genotypes ) , axis = 1 , ) # Making sure the size is the same assert norm_cur . shape [ 0 ] == norm_others . shape [ 0 ] # Getting the number of "samples" per marker (taking into account NaN) n = ( ~ np . isnan ( norm_cur . reshape ( norm_cur . shape [ 0 ] , 1 ) ) * ~ np . isnan ( norm_others ) ) . sum ( axis = 0 ) # Computing r (replacing NaN by 0) r = pd . Series ( np . dot ( np . nan_to_num ( norm_cur ) , np . nan_to_num ( norm_others ) / n ) , index = [ g . variant . name for g in other_genotypes ] , name = "r2" if r2 else "r" , ) # Checking no "invalid" values (i.e. < -1 or > 1) r . loc [ r > 1 ] = 1 r . loc [ r < - 1 ] = - 1 if r2 : return r ** 2 else : return r
12,227
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L158-L207
[ "def", "calc_ifft_with_PyCUDA", "(", "Signalfft", ")", ":", "print", "(", "\"starting ifft\"", ")", "Signalfft", "=", "Signalfft", ".", "astype", "(", "_np", ".", "complex64", ")", "Signalfft_gpu", "=", "_gpuarray", ".", "to_gpu", "(", "Signalfft", "[", "0", ":", "len", "(", "Signalfft", ")", "//", "2", "+", "1", "]", ")", "Signal_gpu", "=", "_gpuarray", ".", "empty", "(", "len", "(", "Signalfft", ")", ",", "_np", ".", "float32", ")", "plan", "=", "_Plan", "(", "len", "(", "Signalfft", ")", ",", "_np", ".", "complex64", ",", "_np", ".", "float32", ")", "_ifft", "(", "Signalfft_gpu", ",", "Signal_gpu", ",", "plan", ")", "Signal", "=", "Signal_gpu", ".", "get", "(", ")", "/", "(", "2", "*", "len", "(", "Signalfft", ")", ")", "#normalising as CUDA IFFT is un-normalised", "print", "(", "\"ifft done\"", ")", "return", "Signal" ]
Normalize the genotypes .
def normalize_genotypes ( genotypes ) : genotypes = genotypes . genotypes return ( genotypes - np . nanmean ( genotypes ) ) / np . nanstd ( genotypes )
12,228
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L210-L221
[ "def", "sink_pubsub", "(", "client", ",", "to_delete", ")", ":", "topic", "=", "_sink_pubsub_setup", "(", "client", ")", "to_delete", ".", "append", "(", "topic", ")", "SINK_NAME", "=", "\"robots-pubsub-%d\"", "%", "(", "_millis", "(", ")", ",", ")", "FILTER", "=", "\"logName:apache-access AND textPayload:robot\"", "UPDATED_FILTER", "=", "\"textPayload:robot\"", "# [START sink_pubsub_create]", "DESTINATION", "=", "\"pubsub.googleapis.com/%s\"", "%", "(", "topic", ".", "full_name", ",", ")", "sink", "=", "client", ".", "sink", "(", "SINK_NAME", ",", "filter_", "=", "FILTER", ",", "destination", "=", "DESTINATION", ")", "assert", "not", "sink", ".", "exists", "(", ")", "# API call", "sink", ".", "create", "(", ")", "# API call", "assert", "sink", ".", "exists", "(", ")", "# API call", "# [END sink_pubsub_create]", "to_delete", ".", "insert", "(", "0", ",", "sink", ")", "# delete sink before topic", "# [START client_list_sinks]", "for", "sink", "in", "client", ".", "list_sinks", "(", ")", ":", "# API call(s)", "do_something_with", "(", "sink", ")", "# [END client_list_sinks]", "# [START sink_reload]", "existing_sink", "=", "client", ".", "sink", "(", "SINK_NAME", ")", "existing_sink", ".", "reload", "(", ")", "# [END sink_reload]", "assert", "existing_sink", ".", "filter_", "==", "FILTER", "assert", "existing_sink", ".", "destination", "==", "DESTINATION", "# [START sink_update]", "existing_sink", ".", "filter_", "=", "UPDATED_FILTER", "existing_sink", ".", "update", "(", ")", "# [END sink_update]", "existing_sink", ".", "reload", "(", ")", "assert", "existing_sink", ".", "filter_", "==", "UPDATED_FILTER", "# [START sink_delete]", "sink", ".", "delete", "(", ")", "# [END sink_delete]", "to_delete", ".", "pop", "(", "0", ")" ]
For a given model return a tdMan instance
def _get_tdm ( self , m ) : m = np . atleast_2d ( m ) assert len ( m . shape ) == 2 tdm = crtomo . tdMan ( grid = self . grid , tempdir = self . tempdir ) tdm . configs . add_to_configs ( self . configs ) pid_mag = tdm . parman . add_data ( m [ 0 , : ] ) tdm . register_magnitude_model ( pid_mag ) if m . shape [ 0 ] == 2 : pid_pha = tdm . parman . add_data ( m [ 1 , : ] ) else : pid_pha = tdm . parman . add_data ( np . zeros ( m . shape [ 1 ] ) ) tdm . register_phase_model ( pid_pha ) return tdm
12,229
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/interface.py#L43-L63
[ "def", "sync", "(", "self", ",", "since", "=", "None", ",", "timeout_ms", "=", "30000", ",", "filter", "=", "None", ",", "full_state", "=", "None", ",", "set_presence", "=", "None", ")", ":", "request", "=", "{", "# non-integer timeouts appear to cause issues", "\"timeout\"", ":", "int", "(", "timeout_ms", ")", "}", "if", "since", ":", "request", "[", "\"since\"", "]", "=", "since", "if", "filter", ":", "request", "[", "\"filter\"", "]", "=", "filter", "if", "full_state", ":", "request", "[", "\"full_state\"", "]", "=", "json", ".", "dumps", "(", "full_state", ")", "if", "set_presence", ":", "request", "[", "\"set_presence\"", "]", "=", "set_presence", "return", "self", ".", "_send", "(", "\"GET\"", ",", "\"/sync\"", ",", "query_params", "=", "request", ",", "api_path", "=", "MATRIX_V2_API_PATH", ")" ]
Return the sensitivity matrix
def J ( self , log_sigma ) : m = 1.0 / np . exp ( log_sigma ) tdm = self . _get_tdm ( m ) tdm . model ( sensitivities = True , # output_directory=stage_dir + 'modeling', ) measurements = tdm . measurements ( ) # build up the sensitivity matrix sens_list = [ ] for config_nr , cids in sorted ( tdm . assignments [ 'sensitivities' ] . items ( ) ) : sens_list . append ( tdm . parman . parsets [ cids [ 0 ] ] ) sensitivities_lin = np . array ( sens_list ) # now convert to the log-sensitivities relevant for CRTomo and the # resolution matrix sensitivities_log = sensitivities_lin # multiply measurements on first dimension measurements_rep = np . repeat ( measurements [ : , 0 , np . newaxis ] , sensitivities_lin . shape [ 1 ] , axis = 1 ) # sensitivities_log = sensitivities_log * mfit # multiply resistivities on second dimension m_rep = np . repeat ( m [ np . newaxis , : ] , sensitivities_lin . shape [ 0 ] , axis = 0 ) # eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma factor = - 1 / ( m_rep * measurements_rep ) sensitivities_log = factor * sensitivities_lin # import IPython # IPython.embed() return sensitivities_log
12,230
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/interface.py#L88-L136
[ "def", "to_volume", "(", "mesh", ",", "file_name", "=", "None", ",", "max_element", "=", "None", ",", "mesher_id", "=", "1", ")", ":", "# checks mesher selection", "if", "mesher_id", "not", "in", "[", "1", ",", "4", ",", "7", ",", "10", "]", ":", "raise", "ValueError", "(", "'unavilable mesher selected!'", ")", "else", ":", "mesher_id", "=", "int", "(", "mesher_id", ")", "# set max element length to a best guess if not specified", "if", "max_element", "is", "None", ":", "max_element", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "mesh", ".", "area_faces", ")", ")", "if", "file_name", "is", "not", "None", ":", "# check extensions to make sure it is supported format", "if", "not", "any", "(", "file_name", ".", "lower", "(", ")", ".", "endswith", "(", "e", ")", "for", "e", "in", "[", "'.bdf'", ",", "'.msh'", ",", "'.inp'", ",", "'.diff'", ",", "'.mesh'", "]", ")", ":", "raise", "ValueError", "(", "'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), '", "+", "'Diffpack (*.diff) and Inria Medit (*.mesh) formats '", "+", "'are available!'", ")", "# exports to disk for gmsh to read using a temp file", "mesh_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.stl'", ",", "delete", "=", "False", ")", "mesh_file", ".", "close", "(", ")", "mesh", ".", "export", "(", "mesh_file", ".", "name", ")", "# starts Gmsh Python API script", "gmsh", ".", "initialize", "(", ")", "gmsh", ".", "option", ".", "setNumber", "(", "\"General.Terminal\"", ",", "1", ")", "gmsh", ".", "model", ".", "add", "(", "'Nastran_stl'", ")", "gmsh", ".", "merge", "(", "mesh_file", ".", "name", ")", "dimtag", "=", "gmsh", ".", "model", ".", "getEntities", "(", ")", "[", "0", "]", "dim", "=", "dimtag", "[", "0", "]", "tag", "=", "dimtag", "[", "1", "]", "surf_loop", "=", "gmsh", ".", "model", ".", "geo", ".", "addSurfaceLoop", "(", "[", "tag", "]", ")", "gmsh", ".", "model", ".", "geo", ".", "addVolume", "(", "[", "surf_loop", "]", ")", "gmsh", ".", "model", ".", "geo", ".", "synchronize", "(", ")", "# We can then generate a 3D mesh...", "gmsh", ".", "option", ".", "setNumber", "(", "\"Mesh.Algorithm3D\"", ",", "mesher_id", ")", "gmsh", ".", "option", ".", "setNumber", "(", "\"Mesh.CharacteristicLengthMax\"", ",", "max_element", ")", "gmsh", ".", "model", ".", "mesh", ".", "generate", "(", "3", ")", "dimtag2", "=", "gmsh", ".", "model", ".", "getEntities", "(", ")", "[", "1", "]", "dim2", "=", "dimtag2", "[", "0", "]", "tag2", "=", "dimtag2", "[", "1", "]", "p2", "=", "gmsh", ".", "model", ".", "addPhysicalGroup", "(", "dim2", ",", "[", "tag2", "]", ")", "gmsh", ".", "model", ".", "setPhysicalName", "(", "dim", ",", "p2", ",", "'Nastran_bdf'", ")", "data", "=", "None", "# if file name is None, return msh data using a tempfile", "if", "file_name", "is", "None", ":", "out_data", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.msh'", ",", "delete", "=", "False", ")", "# windows gets mad if two processes try to open the same file", "out_data", ".", "close", "(", ")", "gmsh", ".", "write", "(", "out_data", ".", "name", ")", "with", "open", "(", "out_data", ".", "name", ",", "'rb'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "else", ":", "gmsh", ".", "write", "(", "file_name", ")", "# close up shop", "gmsh", ".", "finalize", "(", ")", "return", "data" ]
Map a list of Ironic UUID to BM nodes .
def set_ironic_uuid ( self , uuid_list ) : # TODO(Gonéri): ensure we adjust the correct node i = iter ( self . nodes ) for uuid in uuid_list : node = next ( i ) node . uuid = uuid
12,231
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/baremetal.py#L89-L96
[ "def", "load_data_table", "(", "file", ",", "index_col", ",", "show_progress", "=", "False", ")", ":", "with", "ZipFile", "(", "file", ")", "as", "zip_file", ":", "file_names", "=", "zip_file", ".", "namelist", "(", ")", "assert", "len", "(", "file_names", ")", "==", "1", ",", "\"Expected a single file from Quandl.\"", "wiki_prices", "=", "file_names", ".", "pop", "(", ")", "with", "zip_file", ".", "open", "(", "wiki_prices", ")", "as", "table_file", ":", "if", "show_progress", ":", "log", ".", "info", "(", "'Parsing raw data.'", ")", "data_table", "=", "pd", ".", "read_csv", "(", "table_file", ",", "parse_dates", "=", "[", "'date'", "]", ",", "index_col", "=", "index_col", ",", "usecols", "=", "[", "'ticker'", ",", "'date'", ",", "'open'", ",", "'high'", ",", "'low'", ",", "'close'", ",", "'volume'", ",", "'ex-dividend'", ",", "'split_ratio'", ",", "]", ",", ")", "data_table", ".", "rename", "(", "columns", "=", "{", "'ticker'", ":", "'symbol'", ",", "'ex-dividend'", ":", "'ex_dividend'", ",", "}", ",", "inplace", "=", "True", ",", "copy", "=", "False", ",", ")", "return", "data_table" ]
Infer the antibiotics resistance of the given record .
def find_resistance ( record ) : for feature in record . features : labels = set ( feature . qualifiers . get ( "label" , [ ] ) ) cassettes = labels . intersection ( _ANTIBIOTICS ) if len ( cassettes ) > 1 : raise RuntimeError ( "multiple resistance cassettes detected" ) elif len ( cassettes ) == 1 : return _ANTIBIOTICS . get ( cassettes . pop ( ) ) raise RuntimeError ( "could not find the resistance of '{}'" . format ( record . id ) )
12,232
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/registry/_utils.py#L16-L33
[ "def", "_NormalizeKeyPath", "(", "self", ",", "key_path", ")", ":", "normalized_key_path", "=", "key_path", ".", "lower", "(", ")", "# The Registry key path should start with:", "# HKEY_LOCAL_MACHINE\\System\\ControlSet followed by 3 digits", "# which makes 39 characters.", "if", "(", "len", "(", "normalized_key_path", ")", "<", "39", "or", "not", "normalized_key_path", ".", "startswith", "(", "self", ".", "_CONTROL_SET_PREFIX", ")", ")", ":", "return", "normalized_key_path", "# Key paths that contain ControlSet### must be normalized to", "# CurrentControlSet.", "return", "''", ".", "join", "(", "[", "self", ".", "_NORMALIZED_CONTROL_SET_PREFIX", ",", "normalized_key_path", "[", "39", ":", "]", "]", ")" ]
Returns stdout as string or None on failure
def shell_cmd ( args , cwd = None ) : if cwd is None : cwd = os . path . abspath ( '.' ) if not isinstance ( args , ( list , tuple ) ) : args = [ args ] ps = Popen ( args , shell = True , cwd = cwd , stdout = PIPE , stderr = PIPE , close_fds = True ) stdout , stderr = ps . communicate ( ) if ps . returncode != 0 : if stderr : stderr = stderr . strip ( ) raise IOError ( 'Shell command %s failed (exit status %r): %s' % ( args , ps . returncode , stderr ) ) return stdout . strip ( )
12,233
https://github.com/rsms/tc/blob/db5da0def734246818f4a6e4531be63b7cbaa236/setup.py#L36-L51
[ "def", "check_orthogonal", "(", "angle", ")", ":", "flow_dir_taudem", "=", "-", "1", "flow_dir", "=", "-", "1", "if", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "e", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "e", "flow_dir", "=", "1", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "ne", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "ne", "flow_dir", "=", "128", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "n", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "n", "flow_dir", "=", "64", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "nw", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "nw", "flow_dir", "=", "32", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "w", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "w", "flow_dir", "=", "16", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "sw", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "sw", "flow_dir", "=", "8", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "s", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "s", "flow_dir", "=", "4", "elif", "MathClass", ".", "floatequal", "(", "angle", ",", "FlowModelConst", ".", "se", ")", ":", "flow_dir_taudem", "=", "FlowModelConst", ".", "se", "flow_dir", "=", "2", "return", "flow_dir_taudem", ",", "flow_dir" ]
Return a new CircularRecord with reverse complement sequence .
def reverse_complement ( self , id = False , name = False , description = False , features = True , annotations = False , letter_annotations = True , dbxrefs = False , ) : return type ( self ) ( super ( CircularRecord , self ) . reverse_complement ( id = id , name = name , description = description , features = features , annotations = annotations , letter_annotations = letter_annotations , dbxrefs = dbxrefs , ) )
12,234
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/record.py#L141-L163
[ "def", "get_agents", "(", "self", ",", "addr", "=", "True", ",", "agent_cls", "=", "None", ",", "include_manager", "=", "False", ")", ":", "agents", "=", "list", "(", "self", ".", "agents", ".", "dict", ".", "values", "(", ")", ")", "if", "hasattr", "(", "self", ",", "'manager'", ")", "and", "self", ".", "manager", "is", "not", "None", ":", "if", "not", "include_manager", ":", "agents", "=", "[", "a", "for", "a", "in", "agents", "if", "a", ".", "addr", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "1", "]", "!=", "'0'", "]", "if", "agent_cls", "is", "not", "None", ":", "agents", "=", "[", "a", "for", "a", "in", "agents", "if", "type", "(", "a", ")", "is", "agent_cls", "]", "if", "addr", ":", "agents", "=", "[", "agent", ".", "addr", "for", "agent", "in", "agents", "]", "return", "agents" ]
Register the SSH private key .
def load_private_key ( self , priv_key ) : with open ( priv_key ) as fd : self . _private_key = paramiko . RSAKey . from_private_key ( fd )
12,235
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L68-L71
[ "def", "standard_tensorboard_wsgi", "(", "flags", ",", "plugin_loaders", ",", "assets_zip_provider", ")", ":", "multiplexer", "=", "event_multiplexer", ".", "EventMultiplexer", "(", "size_guidance", "=", "DEFAULT_SIZE_GUIDANCE", ",", "tensor_size_guidance", "=", "tensor_size_guidance_from_flags", "(", "flags", ")", ",", "purge_orphaned_data", "=", "flags", ".", "purge_orphaned_data", ",", "max_reload_threads", "=", "flags", ".", "max_reload_threads", ")", "loading_multiplexer", "=", "multiplexer", "reload_interval", "=", "flags", ".", "reload_interval", "# For db import op mode, prefer reloading in a child process. See", "# https://github.com/tensorflow/tensorboard/issues/1467", "reload_task", "=", "flags", ".", "reload_task", "if", "reload_task", "==", "'auto'", "and", "flags", ".", "db_import", "and", "flags", ".", "db_import_use_op", ":", "reload_task", "==", "'process'", "db_uri", "=", "flags", ".", "db", "# For DB import mode, create a DB file if we weren't given one.", "if", "flags", ".", "db_import", "and", "not", "flags", ".", "db", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'tbimport'", ")", "atexit", ".", "register", "(", "shutil", ".", "rmtree", ",", "tmpdir", ")", "db_uri", "=", "'sqlite:%s/tmp.sqlite'", "%", "tmpdir", "db_module", ",", "db_connection_provider", "=", "get_database_info", "(", "db_uri", ")", "if", "flags", ".", "db_import", ":", "# DB import mode.", "if", "db_module", "!=", "sqlite3", ":", "raise", "base_plugin", ".", "FlagsError", "(", "'--db_import is only compatible with sqlite DBs'", ")", "logger", ".", "info", "(", "'Importing logdir into DB at %s'", ",", "db_uri", ")", "loading_multiplexer", "=", "db_import_multiplexer", ".", "DbImportMultiplexer", "(", "db_connection_provider", "=", "db_connection_provider", ",", "purge_orphaned_data", "=", "flags", ".", "purge_orphaned_data", ",", "max_reload_threads", "=", "flags", ".", "max_reload_threads", ",", "use_import_op", "=", "flags", ".", "db_import_use_op", ")", "elif", "flags", ".", "db", ":", "# DB read-only mode, never load event logs.", "reload_interval", "=", "-", "1", "plugin_name_to_instance", "=", "{", "}", "context", "=", "base_plugin", ".", "TBContext", "(", "db_module", "=", "db_module", ",", "db_connection_provider", "=", "db_connection_provider", ",", "db_uri", "=", "db_uri", ",", "flags", "=", "flags", ",", "logdir", "=", "flags", ".", "logdir", ",", "multiplexer", "=", "multiplexer", ",", "assets_zip_provider", "=", "assets_zip_provider", ",", "plugin_name_to_instance", "=", "plugin_name_to_instance", ",", "window_title", "=", "flags", ".", "window_title", ")", "plugins", "=", "[", "]", "for", "loader", "in", "plugin_loaders", ":", "plugin", "=", "loader", ".", "load", "(", "context", ")", "if", "plugin", "is", "None", ":", "continue", "plugins", ".", "append", "(", "plugin", ")", "plugin_name_to_instance", "[", "plugin", ".", "plugin_name", "]", "=", "plugin", "return", "TensorBoardWSGIApp", "(", "flags", ".", "logdir", ",", "plugins", ",", "loading_multiplexer", ",", "reload_interval", ",", "flags", ".", "path_prefix", ",", "reload_task", ")" ]
Start the ssh client and connect to the host .
def start ( self ) : if self . via_ip : connect_to = self . via_ip self . description = '[%s@%s via %s]' % ( self . _user , self . _hostname , self . via_ip ) else : connect_to = self . _hostname self . description = '[%s@%s]' % ( self . _user , self . _hostname ) exception = None for i in range ( 60 ) : try : self . _client . connect ( connect_to , username = self . _user , allow_agent = True , key_filename = self . _key_filename ) # NOTE(Gonéri): TypeError is in the list because of # https://github.com/paramiko/paramiko/issues/615 self . _transport = self . _get_transport ( ) except ( OSError , TypeError , ssh_exception . SSHException , ssh_exception . NoValidConnectionsError ) as e : exception = e LOG . info ( '%s waiting for %s: %s' % ( self . description , connect_to , str ( exception ) ) ) time . sleep ( 1 ) else : LOG . debug ( '%s connected' % self . description ) self . _started = True return _error = ( "unable to connect to ssh service on '%s': %s" % ( self . _hostname , str ( exception ) ) ) LOG . error ( _error ) raise exception
12,236
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L98-L142
[ "def", "setOverlayTexelAspect", "(", "self", ",", "ulOverlayHandle", ",", "fTexelAspect", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexelAspect", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fTexelAspect", ")", "return", "result" ]
Returns a channel according to if there is a redirection to do or not .
def _get_channel ( self ) : channel = self . _transport . open_session ( ) channel . set_combine_stderr ( True ) channel . get_pty ( ) return channel
12,237
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L237-L244
[ "def", "port_list_compress", "(", "port_list", ")", ":", "if", "not", "port_list", "or", "len", "(", "port_list", ")", "==", "0", ":", "LOGGER", ".", "info", "(", "\"Invalid or empty port list.\"", ")", "return", "''", "port_list", "=", "sorted", "(", "set", "(", "port_list", ")", ")", "compressed_list", "=", "[", "]", "for", "key", ",", "group", "in", "itertools", ".", "groupby", "(", "enumerate", "(", "port_list", ")", ",", "lambda", "t", ":", "t", "[", "1", "]", "-", "t", "[", "0", "]", ")", ":", "group", "=", "list", "(", "group", ")", "if", "group", "[", "0", "]", "[", "1", "]", "==", "group", "[", "-", "1", "]", "[", "1", "]", ":", "compressed_list", ".", "append", "(", "str", "(", "group", "[", "0", "]", "[", "1", "]", ")", ")", "else", ":", "compressed_list", ".", "append", "(", "str", "(", "group", "[", "0", "]", "[", "1", "]", ")", "+", "'-'", "+", "str", "(", "group", "[", "-", "1", "]", "[", "1", "]", ")", ")", "return", "','", ".", "join", "(", "compressed_list", ")" ]
Print all the fields of a Bitfield object to stdout . This is primarly a diagnostic aid during debugging .
def print_fields ( bf , * args , * * kwargs ) : vals = { k : hex ( v ) for k , v in bf . items ( ) } print ( bf . base , vals , * args , * * kwargs )
12,238
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/__init__.py#L201-L208
[ "def", "clean_cell_meta", "(", "self", ",", "meta", ")", ":", "for", "k", ",", "v", "in", "DEFAULT_CELL_METADATA", ".", "items", "(", ")", ":", "if", "meta", ".", "get", "(", "k", ",", "None", ")", "==", "v", ":", "meta", ".", "pop", "(", "k", ",", "None", ")", "return", "meta" ]
Return a new bitfield with the same value . The returned value is a copy and so is no longer linked to the original bitfield . This is important when the original is located at anything other than normal memory with accesses to it either slow or having side effects . Creating a clone and working against that clone means that only one read will occur .
def clone ( self ) : temp = self . __class__ ( ) temp . base = self . base return temp
12,239
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/__init__.py#L44-L56
[ "def", "summary_table", "(", "errors", ":", "int", ",", "fails", ":", "int", ",", "warns", ":", "int", ",", "skips", ":", "int", ",", "infos", ":", "int", ",", "passes", ":", "int", ",", "total", ":", "int", ")", "->", "str", ":", "return", "f\"\"\"<h2>Summary</h2>\n <table>\n <tr>\n <th>{EMOTICON['ERROR']} ERROR</th>\n <th>{EMOTICON['FAIL']} FAIL</th>\n <th>{EMOTICON['WARN']} WARN</th>\n <th>{EMOTICON['SKIP']} SKIP</th>\n <th>{EMOTICON['INFO']} INFO</th>\n <th>{EMOTICON['PASS']} PASS</th>\n </tr>\n <tr>\n <td>{errors}</td>\n <td>{fails}</td>\n <td>{warns}</td>\n <td>{skips}</td>\n <td>{infos}</td>\n <td>{passes}</td>\n </tr>\n <tr>\n <td>{round(errors / total * 100)}%</td>\n <td>{round(fails / total * 100)}%</td>\n <td>{round(warns / total * 100)}%</td>\n <td>{round(skips / total * 100)}%</td>\n <td>{round(infos / total * 100)}%</td>\n <td>{round(passes / total * 100)}%</td>\n </tr>\n </table>\n \"\"\"" ]
Find the list of new paths in this comparison .
def new ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_intermediates : bool = True ) -> Iterator [ str ] : if self . is_new : yield str ( base / self . right . name )
12,240
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L61-L76
[ "async", "def", "retrieve", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "try", ":", "async", "with", "self", ".", "websession", ".", "request", "(", "'GET'", ",", "url", ",", "*", "*", "kwargs", ")", "as", "res", ":", "if", "res", ".", "status", "!=", "200", ":", "raise", "Exception", "(", "\"Could not retrieve information from API\"", ")", "if", "res", ".", "content_type", "==", "'application/json'", ":", "return", "await", "res", ".", "json", "(", ")", "return", "await", "res", ".", "text", "(", ")", "except", "aiohttp", ".", "ClientError", "as", "err", ":", "logging", ".", "error", "(", "err", ")" ]
Find the paths of modified files . There is no option to include intermediate directories as all files and directories exist in both the left and right trees .
def modified ( self , base : pathlib . PurePath = pathlib . PurePath ( ) ) -> Iterator [ str ] : # N.B. this method will only ever return files, as directories cannot # be "modified" if self . is_modified : yield str ( base / self . right . name )
12,241
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L91-L105
[ "def", "get_host_address", "(", "host", "=", "None", ",", "default_address", "=", "DEFAULT_HOST_IP", ")", ":", "try", ":", "return", "unicode", "(", "socket", ".", "gethostbyname", "(", "host", "or", "socket", ".", "gethostname", "(", ")", ")", ",", "Constants", ".", "default_codec", ",", "Constants", ".", "codec_error", ")", "except", "Exception", "as", "error", ":", "return", "default_address" ]
Find the paths of entities deleted between the left and right entities in this comparison .
def deleted ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_children : bool = True , include_directories : bool = True ) -> Iterator [ str ] : if self . is_deleted : yield str ( base / self . left . name )
12,242
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L116-L133
[ "def", "updatepLvlNextFunc", "(", "self", ")", ":", "orig_time", "=", "self", ".", "time_flow", "self", ".", "timeFwd", "(", ")", "pLvlNextFunc", "=", "[", "]", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "pLvlNextFunc", ".", "append", "(", "LinearInterp", "(", "np", ".", "array", "(", "[", "0.", ",", "1.", "]", ")", ",", "np", ".", "array", "(", "[", "0.", ",", "self", ".", "PermGroFac", "[", "t", "]", "]", ")", ")", ")", "self", ".", "pLvlNextFunc", "=", "pLvlNextFunc", "self", ".", "addToTimeVary", "(", "'pLvlNextFunc'", ")", "if", "not", "orig_time", ":", "self", ".", "timeRev", "(", ")" ]
Calculate the comparison of two entities .
def compare ( left : Optional [ L ] , right : Optional [ R ] ) -> 'Comparison[L, R]' : if isinstance ( left , File ) and isinstance ( right , Directory ) : return FileDirectoryComparison ( left , right ) if isinstance ( left , Directory ) and isinstance ( right , File ) : return DirectoryFileComparison ( left , right ) if isinstance ( left , File ) or isinstance ( right , File ) : return FileComparison ( left , right ) if isinstance ( left , Directory ) or isinstance ( right , Directory ) : return DirectoryComparison ( left , right ) raise TypeError ( f'Cannot compare entities: {left}, {right}' )
12,243
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L157-L189
[ "async", "def", "postprocess_websocket", "(", "self", ",", "response", ":", "Optional", "[", "Response", "]", ",", "websocket_context", ":", "Optional", "[", "WebsocketContext", "]", "=", "None", ",", ")", "->", "Response", ":", "websocket_", "=", "(", "websocket_context", "or", "_websocket_ctx_stack", ".", "top", ")", ".", "websocket", "functions", "=", "(", "websocket_context", "or", "_websocket_ctx_stack", ".", "top", ")", ".", "_after_websocket_functions", "blueprint", "=", "websocket_", ".", "blueprint", "if", "blueprint", "is", "not", "None", ":", "functions", "=", "chain", "(", "functions", ",", "self", ".", "after_websocket_funcs", "[", "blueprint", "]", ")", "functions", "=", "chain", "(", "functions", ",", "self", ".", "after_websocket_funcs", "[", "None", "]", ")", "for", "function", "in", "functions", ":", "response", "=", "await", "function", "(", "response", ")", "session_", "=", "(", "websocket_context", "or", "_request_ctx_stack", ".", "top", ")", ".", "session", "if", "not", "self", ".", "session_interface", ".", "is_null_session", "(", "session_", ")", ":", "if", "response", "is", "None", "and", "isinstance", "(", "session_", ",", "SecureCookieSession", ")", "and", "session_", ".", "modified", ":", "self", ".", "logger", ".", "exception", "(", "\"Secure Cookie Session modified during websocket handling. \"", "\"These modifications will be lost as a cookie cannot be set.\"", ")", "else", ":", "await", "self", ".", "save_session", "(", "session_", ",", "response", ")", "return", "response" ]
Print this comparison and its children with indentation to represent nesting .
def print_hierarchy ( self , level : int = 0 , file : IO [ str ] = sys . stdout ) -> None : print ( ' ' * self . _INDENT_SIZE * level + str ( self ) , file = file )
12,244
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L191-L202
[ "def", "gen_input_view", "(", "sig_dic", ")", ":", "if", "sig_dic", "[", "'en'", "]", "==", "'tag_file_download'", ":", "html_str", "=", "HTML_TPL_DICT", "[", "'input_view_download'", "]", ".", "format", "(", "sig_zh", "=", "sig_dic", "[", "'zh'", "]", ",", "sig_unit", "=", "sig_dic", "[", "'dic'", "]", "[", "1", "]", ")", "elif", "sig_dic", "[", "'en'", "]", "in", "[", "'tag_access_link'", ",", "'tag_dmoz_url'", ",", "'tag_online_link'", ",", "'tag_event_url'", ",", "'tag_expert_home'", ",", "'tag_pic_url'", "]", ":", "html_str", "=", "HTML_TPL_DICT", "[", "'input_view_link'", "]", ".", "format", "(", "sig_dic", "[", "'en'", "]", ",", "sig_dic", "[", "'zh'", "]", ",", "sig_dic", "[", "'dic'", "]", "[", "1", "]", ")", "else", ":", "html_str", "=", "HTML_TPL_DICT", "[", "'input_view'", "]", ".", "format", "(", "sig_dic", "[", "'en'", "]", ",", "sig_dic", "[", "'zh'", "]", ",", "sig_dic", "[", "'dic'", "]", "[", "1", "]", ")", "return", "html_str" ]
Find whether the files on the left and right are different . Note modified implies the contents of the file have changed which is predicated on the file existing on both the left and right . Therefore this will be false if the file on the left has been deleted or the file on the right is new .
def is_modified ( self ) -> bool : if self . is_new or self . is_deleted : return False return self . left . md5 != self . right . md5
12,245
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L224-L236
[ "def", "GetStatus", "(", "self", ",", "Channel", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_GetStatus", "(", "Channel", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.GetStatus\"", ")", "raise" ]
Build a index for the given file .
def generate_index ( fn , cols = None , names = None , sep = " " ) : # Some assertions assert cols is not None , "'cols' was not set" assert names is not None , "'names' was not set" assert len ( cols ) == len ( names ) # Getting the open function bgzip , open_func = get_open_func ( fn , return_fmt = True ) # Reading the required columns data = pd . read_csv ( fn , sep = sep , engine = "c" , usecols = cols , names = names , compression = "gzip" if bgzip else None ) # Getting the seek information f = open_func ( fn , "rb" ) data [ "seek" ] = np . fromiter ( _seek_generator ( f ) , dtype = np . uint ) [ : - 1 ] f . close ( ) # Saving the index to file write_index ( get_index_fn ( fn ) , data ) return data
12,246
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L59-L92
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Get the opening function .
def get_open_func ( fn , return_fmt = False ) : # The file might be compressed using bgzip bgzip = None with open ( fn , "rb" ) as i_file : bgzip = i_file . read ( 3 ) == b"\x1f\x8b\x08" if bgzip and not HAS_BIOPYTHON : raise ValueError ( "needs BioPython to index a bgzip file" ) open_func = open if bgzip : open_func = BgzfReader # Trying to read try : with open_func ( fn , "r" ) as i_file : if bgzip : if not i_file . seekable ( ) : raise ValueError pass except ValueError : raise ValueError ( "{}: use bgzip for compression..." . format ( fn ) ) if return_fmt : return bgzip , open_func return open_func
12,247
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L95-L133
[ "def", "delete_logs", "(", "room", ")", ":", "from", "indico_chat", ".", "plugin", "import", "ChatPlugin", "base_url", "=", "ChatPlugin", ".", "settings", ".", "get", "(", "'log_url'", ")", "if", "not", "base_url", "or", "room", ".", "custom_server", ":", "return", "try", ":", "response", "=", "requests", ".", "get", "(", "posixpath", ".", "join", "(", "base_url", ",", "'delete'", ")", ",", "params", "=", "{", "'cr'", ":", "room", ".", "jid", "}", ")", ".", "json", "(", ")", "except", "(", "RequestException", ",", "ValueError", ")", ":", "current_plugin", ".", "logger", ".", "exception", "(", "'Could not delete logs for %s'", ",", "room", ".", "jid", ")", "return", "if", "not", "response", ".", "get", "(", "'success'", ")", ":", "current_plugin", ".", "logger", ".", "warning", "(", "'Could not delete logs for %s: %s'", ",", "room", ".", "jid", ",", "response", ".", "get", "(", "'error'", ")", ")" ]
Restores the index for a given file .
def get_index ( fn , cols , names , sep ) : if not has_index ( fn ) : # The index doesn't exists, generate it return generate_index ( fn , cols , names , sep ) # Retrieving the index file_index = read_index ( get_index_fn ( fn ) ) # Checking the names are there if len ( set ( names ) - ( set ( file_index . columns ) - { 'seek' } ) ) != 0 : raise ValueError ( "{}: missing index columns: reindex" . format ( fn ) ) if "seek" not in file_index . columns : raise ValueError ( "{}: invalid index: reindex" . format ( fn ) ) return file_index
12,248
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L136-L165
[ "def", "unsubscribe_user_from_discussion", "(", "recID", ",", "uid", ")", ":", "query", "=", "\"\"\"DELETE FROM \"cmtSUBSCRIPTION\"\n WHERE id_bibrec=%s AND id_user=%s\"\"\"", "params", "=", "(", "recID", ",", "uid", ")", "try", ":", "res", "=", "run_sql", "(", "query", ",", "params", ")", "except", ":", "return", "0", "if", "res", ">", "0", ":", "return", "1", "return", "0" ]
Writes the index to file .
def write_index ( fn , index ) : with open ( fn , "wb" ) as o_file : o_file . write ( _CHECK_STRING ) o_file . write ( zlib . compress ( bytes ( index . to_csv ( None , index = False , encoding = "utf-8" ) , encoding = "utf-8" , ) ) )
12,249
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L168-L181
[ "def", "AND", "(", "queryArr", ",", "exclude", "=", "None", ")", ":", "assert", "isinstance", "(", "queryArr", ",", "list", ")", ",", "\"provided argument as not a list\"", "assert", "len", "(", "queryArr", ")", ">", "0", ",", "\"queryArr had an empty list\"", "q", "=", "CombinedQuery", "(", ")", "q", ".", "setQueryParam", "(", "\"$and\"", ",", "[", "]", ")", "for", "item", "in", "queryArr", ":", "assert", "isinstance", "(", "item", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"item in the list was not a CombinedQuery or BaseQuery instance\"", "q", ".", "getQuery", "(", ")", "[", "\"$and\"", "]", ".", "append", "(", "item", ".", "getQuery", "(", ")", ")", "if", "exclude", "!=", "None", ":", "assert", "isinstance", "(", "exclude", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"exclude parameter was not a CombinedQuery or BaseQuery instance\"", "q", ".", "setQueryParam", "(", "\"$not\"", ",", "exclude", ".", "getQuery", "(", ")", ")", "return", "q" ]
Reads index from file .
def read_index ( fn ) : index = None with open ( fn , "rb" ) as i_file : if i_file . read ( len ( _CHECK_STRING ) ) != _CHECK_STRING : raise ValueError ( "{}: not a valid index file" . format ( fn ) ) index = pd . read_csv ( io . StringIO ( zlib . decompress ( i_file . read ( ) ) . decode ( encoding = "utf-8" ) , ) ) return index
12,250
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L184-L206
[ "def", "defBoundary", "(", "self", ")", ":", "self", ".", "BoroCnstNatAll", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "# Find the natural borrowing constraint conditional on next period's state", "for", "j", "in", "range", "(", "self", ".", "StateCount", ")", ":", "PermShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "1", "]", ")", "TranShkMinNext", "=", "np", ".", "min", "(", "self", ".", "IncomeDstn_list", "[", "j", "]", "[", "2", "]", ")", "self", ".", "BoroCnstNatAll", "[", "j", "]", "=", "(", "self", ".", "solution_next", ".", "mNrmMin", "[", "j", "]", "-", "TranShkMinNext", ")", "*", "(", "self", ".", "PermGroFac_list", "[", "j", "]", "*", "PermShkMinNext", ")", "/", "self", ".", "Rfree_list", "[", "j", "]", "self", ".", "BoroCnstNat_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "mNrmMin_list", "=", "np", ".", "zeros", "(", "self", ".", "StateCount", ")", "+", "np", ".", "nan", "self", ".", "BoroCnstDependency", "=", "np", ".", "zeros", "(", "(", "self", ".", "StateCount", ",", "self", ".", "StateCount", ")", ")", "+", "np", ".", "nan", "# The natural borrowing constraint in each current state is the *highest*", "# among next-state-conditional natural borrowing constraints that could", "# occur from this current state.", "for", "i", "in", "range", "(", "self", ".", "StateCount", ")", ":", "possible_next_states", "=", "self", ".", "MrkvArray", "[", "i", ",", ":", "]", ">", "0", "self", ".", "BoroCnstNat_list", "[", "i", "]", "=", "np", ".", "max", "(", "self", ".", "BoroCnstNatAll", "[", "possible_next_states", "]", ")", "# Explicitly handle the \"None\" case: ", "if", "self", ".", "BoroCnstArt", "is", "None", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "else", ":", "self", ".", "mNrmMin_list", "[", "i", "]", "=", "np", ".", "max", "(", "[", "self", ".", "BoroCnstNat_list", "[", "i", "]", ",", "self", ".", "BoroCnstArt", "]", ")", "self", ".", "BoroCnstDependency", "[", "i", ",", ":", "]", "=", "self", ".", "BoroCnstNat_list", "[", "i", "]", "==", "self", ".", "BoroCnstNatAll" ]
Create the path to the folder at which the metadata and optimizer pickle should be saved
def make_path ( phase ) -> str : return "{}/{}{}{}" . format ( conf . instance . output_path , phase . phase_path , phase . phase_name , phase . phase_tag )
12,251
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L177-L181
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Save the optimizer associated with the phase as a pickle
def save_optimizer_for_phase ( phase ) : with open ( make_optimizer_pickle_path ( phase ) , "w+b" ) as f : f . write ( pickle . dumps ( phase . optimizer ) )
12,252
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L184-L189
[ "def", "SetConsoleTextAttribute", "(", "stream_id", ",", "attrs", ")", ":", "handle", "=", "handles", "[", "stream_id", "]", "return", "windll", ".", "kernel32", ".", "SetConsoleTextAttribute", "(", "handle", ",", "attrs", ")" ]
Assert that the previously saved optimizer is equal to the phase s optimizer if a saved optimizer is found .
def assert_optimizer_pickle_matches_for_phase ( phase ) : path = make_optimizer_pickle_path ( phase ) if os . path . exists ( path ) : with open ( path , "r+b" ) as f : loaded_optimizer = pickle . loads ( f . read ( ) ) if phase . optimizer != loaded_optimizer : raise exc . PipelineException ( f"Can't restart phase at path {path} because settings don't match. " f"Did you change the optimizer settings or model?" )
12,253
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L192-L212
[ "def", "free", "(", "self", ",", "lpAddress", ")", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_VM_OPERATION", ")", "win32", ".", "VirtualFreeEx", "(", "hProcess", ",", "lpAddress", ")" ]
Add the result of a phase .
def add ( self , phase_name , result ) : if phase_name in self . __result_dict : raise exc . PipelineException ( "Results from a phase called {} already exist in the pipeline" . format ( phase_name ) ) self . __result_list . append ( result ) self . __result_dict [ phase_name ] = result
12,254
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L38-L53
[ "def", "_check_rest_version", "(", "self", ",", "version", ")", ":", "version", "=", "str", "(", "version", ")", "if", "version", "not", "in", "self", ".", "supported_rest_versions", ":", "msg", "=", "\"Library is incompatible with REST API version {0}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "version", ")", ")", "array_rest_versions", "=", "self", ".", "_list_available_rest_versions", "(", ")", "if", "version", "not", "in", "array_rest_versions", ":", "msg", "=", "\"Array is incompatible with REST API version {0}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "version", ")", ")", "return", "LooseVersion", "(", "version", ")" ]
Returns the result of a previous phase by its name
def from_phase ( self , phase_name ) : try : return self . __result_dict [ phase_name ] except KeyError : raise exc . PipelineException ( "No previous phase named {} found in results ({})" . format ( phase_name , ", " . join ( self . __result_dict . keys ( ) ) ) )
12,255
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L74-L97
[ "def", "update", "(", "self", ",", "data", ")", ":", "self", ".", "_md", ".", "update", "(", "data", ")", "bufpos", "=", "self", ".", "_nbytes", "&", "63", "self", ".", "_nbytes", "+=", "len", "(", "data", ")", "if", "self", ".", "_rarbug", "and", "len", "(", "data", ")", ">", "64", ":", "dpos", "=", "self", ".", "block_size", "-", "bufpos", "while", "dpos", "+", "self", ".", "block_size", "<=", "len", "(", "data", ")", ":", "self", ".", "_corrupt", "(", "data", ",", "dpos", ")", "dpos", "+=", "self", ".", "block_size" ]
Save metadata associated with the phase such as the name of the pipeline the name of the phase and the name of the data being fit
def save_metadata ( self , phase , data_name ) : with open ( "{}/.metadata" . format ( make_path ( phase ) ) , "w+" ) as f : f . write ( "pipeline={}\nphase={}\ndata={}" . format ( self . pipeline_name , phase . phase_name , data_name ) )
12,256
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L134-L141
[ "def", "truncate_schema", "(", "self", ")", ":", "assert", "self", ".", "server", "==", "'localhost'", "con", "=", "self", ".", "connection", "or", "self", ".", "_connect", "(", ")", "self", ".", "_initialize", "(", "con", ")", "cur", "=", "con", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'DELETE FROM publication;'", ")", "cur", ".", "execute", "(", "'TRUNCATE systems CASCADE;'", ")", "con", ".", "commit", "(", ")", "con", ".", "close", "(", ")", "return" ]
Run the function for each phase in the pipeline .
def run_function ( self , func , data_name = None , assert_optimizer_pickle_matches = True ) : results = ResultsCollection ( ) for i , phase in enumerate ( self . phases ) : logger . info ( "Running Phase {} (Number {})" . format ( phase . optimizer . phase_name , i ) ) if assert_optimizer_pickle_matches : assert_optimizer_pickle_matches_for_phase ( phase ) save_optimizer_for_phase ( phase ) self . save_metadata ( phase , data_name ) results . add ( phase . phase_name , func ( phase , results ) ) return results
12,257
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L143-L167
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Take a str and transform it into a byte array .
def strtobytes ( input , encoding ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return _strtobytes_py3 ( input , encoding ) return _strtobytes_py2 ( input , encoding )
12,258
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/bytesutils.py#L14-L19
[ "def", "GET_save_conditionvalues", "(", "self", ")", "->", "None", ":", "state", ".", "conditions", "[", "self", ".", "_id", "]", "=", "state", ".", "conditions", ".", "get", "(", "self", ".", "_id", ",", "{", "}", ")", "state", ".", "conditions", "[", "self", ".", "_id", "]", "[", "state", ".", "idx2", "]", "=", "state", ".", "hp", ".", "conditions" ]
Indexes an IMPUTE2 file .
def index_impute2 ( fn ) : logger . info ( "Indexing {} (IMPUTE2)" . format ( fn ) ) impute2_index ( fn , cols = [ 0 , 1 , 2 ] , names = [ "chrom" , "name" , "pos" ] , sep = " " ) logger . info ( "Index generated" )
12,259
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/__main__.py#L60-L69
[ "def", "hide", "(", "self", ",", "selections", ")", ":", "if", "'atoms'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'atoms'", "]", "=", "selections", "[", "'atoms'", "]", "self", ".", "on_atom_hidden_changed", "(", ")", "if", "'bonds'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'bonds'", "]", "=", "selections", "[", "'bonds'", "]", "self", ".", "on_bond_hidden_changed", "(", ")", "if", "'box'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'box'", "]", "=", "box_s", "=", "selections", "[", "'box'", "]", "if", "box_s", ".", "mask", "[", "0", "]", ":", "if", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "remove_renderer", "(", "self", ".", "box_renderer", ")", "else", ":", "if", "not", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "add_renderer", "(", "self", ".", "box_renderer", ")", "return", "self", ".", "hidden_state" ]
Indexes a BGEN file .
def index_bgen ( fn , legacy = False ) : logger . info ( "Indexing {} (BGEN) using 'bgenix'{}" . format ( fn , " (legacy mode)" if legacy else "" , ) ) command = [ "bgenix" , "-g" , fn , "-index" ] if legacy : command . append ( "-with-rowid" ) try : logger . info ( "Executing '{}'" . format ( " " . join ( command ) ) ) subprocess . Popen ( command ) . communicate ( ) except FileNotFoundError : logger . error ( "Cannot find 'bgenix', impossible to index {}" . format ( fn ) ) sys . exit ( 1 ) logger . info ( "Index generated" )
12,260
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/__main__.py#L72-L91
[ "def", "_get_session", "(", "server", ")", ":", "if", "server", "in", "_sessions", ":", "return", "_sessions", "[", "server", "]", "config", "=", "_get_spacewalk_configuration", "(", "server", ")", "if", "not", "config", ":", "raise", "Exception", "(", "'No config for \\'{0}\\' found on master'", ".", "format", "(", "server", ")", ")", "session", "=", "_get_client_and_key", "(", "config", "[", "'api_url'", "]", ",", "config", "[", "'username'", "]", ",", "config", "[", "'password'", "]", ")", "atexit", ".", "register", "(", "_disconnect_session", ",", "session", ")", "client", "=", "session", "[", "'client'", "]", "key", "=", "session", "[", "'key'", "]", "_sessions", "[", "server", "]", "=", "(", "client", ",", "key", ")", "return", "client", ",", "key" ]
Create a UNTL XML subelement .
def create_untl_xml_subelement ( parent , element , prefix = '' ) : subelement = SubElement ( parent , prefix + element . tag ) if element . content is not None : subelement . text = element . content if element . qualifier is not None : subelement . attrib [ "qualifier" ] = element . qualifier if element . children > 0 : for child in element . children : SubElement ( subelement , prefix + child . tag ) . text = child . content else : subelement . text = element . content return subelement
12,261
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L22-L35
[ "def", "register_keepalive", "(", "self", ",", "cmd", ",", "callback", ")", ":", "regid", "=", "random", ".", "random", "(", ")", "if", "self", ".", "_customkeepalives", "is", "None", ":", "self", ".", "_customkeepalives", "=", "{", "regid", ":", "(", "cmd", ",", "callback", ")", "}", "else", ":", "while", "regid", "in", "self", ".", "_customkeepalives", ":", "regid", "=", "random", ".", "random", "(", ")", "self", ".", "_customkeepalives", "[", "regid", "]", "=", "(", "cmd", ",", "callback", ")", "return", "regid" ]
Determine if there are elements not in the children that need to be included as blank elements in the form .
def add_missing_children ( required_children , element_children ) : element_tags = [ element . tag for element in element_children ] # Loop through the elements that should be in the form. for contained_element in required_children : # If the element doesn't exist in the form, # add the element to the children. if contained_element not in element_tags : try : added_child = PYUNTL_DISPATCH [ contained_element ] ( content = '' ) except : added_child = PYUNTL_DISPATCH [ contained_element ] ( ) element_children . append ( added_child ) return element_children
12,262
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L38-L53
[ "def", "load", "(", "dbname", ")", ":", "db", "=", "Database", "(", "dbname", ")", "# Get the name of the objects", "tables", "=", "get_table_list", "(", "db", ".", "cur", ")", "# Create a Trace instance for each object", "chains", "=", "0", "for", "name", "in", "tables", ":", "db", ".", "_traces", "[", "name", "]", "=", "Trace", "(", "name", "=", "name", ",", "db", "=", "db", ")", "db", ".", "_traces", "[", "name", "]", ".", "_shape", "=", "get_shape", "(", "db", ".", "cur", ",", "name", ")", "setattr", "(", "db", ",", "name", ",", "db", ".", "_traces", "[", "name", "]", ")", "db", ".", "cur", ".", "execute", "(", "'SELECT MAX(trace) FROM [%s]'", "%", "name", ")", "chains", "=", "max", "(", "chains", ",", "db", ".", "cur", ".", "fetchall", "(", ")", "[", "0", "]", "[", "0", "]", "+", "1", ")", "db", ".", "chains", "=", "chains", "db", ".", "trace_names", "=", "chains", "*", "[", "tables", ",", "]", "db", ".", "_state_", "=", "{", "}", "return", "db" ]
Set the qualifier for the element .
def set_qualifier ( self , value ) : if self . allows_qualifier : self . qualifier = value . strip ( ) else : raise UNTLStructureException ( 'Element "%s" does not allow a qualifier' % ( self . tag , ) )
12,263
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L85-L96
[ "def", "compare", "(", "self", ",", "other", ",", "components", "=", "[", "]", ")", ":", "lg", ".", "debug", "(", "\"Comparing publish %s (%s) and %s (%s)\"", "%", "(", "self", ".", "name", ",", "self", ".", "storage", "or", "\"local\"", ",", "other", ".", "name", ",", "other", ".", "storage", "or", "\"local\"", ")", ")", "diff", ",", "equal", "=", "(", "{", "}", ",", "{", "}", ")", "for", "component", ",", "snapshots", "in", "self", ".", "components", ".", "items", "(", ")", ":", "if", "component", "not", "in", "list", "(", "other", ".", "components", ".", "keys", "(", ")", ")", ":", "# Component is missing in other", "diff", "[", "component", "]", "=", "snapshots", "continue", "equal_snapshots", "=", "list", "(", "set", "(", "snapshots", ")", ".", "intersection", "(", "other", ".", "components", "[", "component", "]", ")", ")", "if", "equal_snapshots", ":", "lg", ".", "debug", "(", "\"Equal snapshots for %s: %s\"", "%", "(", "component", ",", "equal_snapshots", ")", ")", "equal", "[", "component", "]", "=", "equal_snapshots", "diff_snapshots", "=", "list", "(", "set", "(", "snapshots", ")", ".", "difference", "(", "other", ".", "components", "[", "component", "]", ")", ")", "if", "diff_snapshots", ":", "lg", ".", "debug", "(", "\"Different snapshots for %s: %s\"", "%", "(", "component", ",", "diff_snapshots", ")", ")", "diff", "[", "component", "]", "=", "diff_snapshots", "return", "(", "diff", ",", "equal", ")" ]
Add the form attribute to the UNTL Python object .
def add_form ( self , * * kwargs ) : vocabularies = kwargs . get ( 'vocabularies' , None ) qualifier = kwargs . get ( 'qualifier' , None ) content = kwargs . get ( 'content' , None ) parent_tag = kwargs . get ( 'parent_tag' , None ) superuser = kwargs . get ( 'superuser' , False ) # Element has both the qualifier and content. if qualifier is not None and content is not None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , input_value = content , untl_object = self , superuser = superuser , ) # Element just has a qualifier. elif qualifier is not None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , untl_object = self , superuser = superuser , ) # Element just has content. elif content is not None : # If the element is a child element, # create the form attribute. if parent_tag is None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , superuser = superuser , ) else : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , parent_tag = parent_tag , superuser = superuser , ) # Element has children and no qualifiers or content # or is blank (not originally in the UNTL record). else : # Element is a child element. if parent_tag is None : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , superuser = superuser , ) else : # Create the form attribute. self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , parent_tag = parent_tag , superuser = superuser , )
12,264
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L127-L191
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "where_queries", "=", "[", "]", "params", "=", "{", "}", "if", "args", ".", "watching", "or", "args", ".", "available", ":", "where_queries", ".", "append", "(", "'regexp IS NOT NULL'", ")", "if", "args", ".", "query", ":", "where_queries", ".", "append", "(", "'title LIKE :title'", ")", "params", "[", "'title'", "]", "=", "_compile_sql_query", "(", "args", ".", "query", ")", "if", "not", "where_queries", ":", "print", "(", "'Must include at least one filter.'", ")", "return", "where_query", "=", "' AND '", ".", "join", "(", "where_queries", ")", "logger", ".", "debug", "(", "'Search where %s with params %s'", ",", "where_query", ",", "params", ")", "results", "=", "list", "(", ")", "all_files", "=", "[", "filename", "for", "filename", "in", "_find_files", "(", "state", ".", "config", "[", "'anime'", "]", ".", "getpath", "(", "'watchdir'", ")", ")", "if", "_is_video", "(", "filename", ")", "]", "for", "anime", "in", "query", ".", "select", ".", "select", "(", "state", ".", "db", ",", "where_query", ",", "params", ")", ":", "logger", ".", "debug", "(", "'For anime %s with regexp %s'", ",", "anime", ".", "aid", ",", "anime", ".", "regexp", ")", "if", "anime", ".", "regexp", "is", "not", "None", ":", "anime_files", "=", "AnimeFiles", "(", "anime", ".", "regexp", ",", "all_files", ")", "logger", ".", "debug", "(", "'Found files %s'", ",", "anime_files", ".", "filenames", ")", "query", ".", "files", ".", "cache_files", "(", "state", ".", "db", ",", "anime", ".", "aid", ",", "anime_files", ")", "available", "=", "anime_files", ".", "available_string", "(", "anime", ".", "watched_episodes", ")", "else", ":", "available", "=", "''", "if", "not", "args", ".", "available", "or", "available", ":", "results", ".", "append", "(", "(", "anime", ".", "aid", ",", "anime", ".", "title", ",", "anime", ".", "type", ",", "'{}/{}'", ".", "format", "(", "anime", ".", "watched_episodes", ",", "anime", ".", "episodecount", ")", ",", "'yes'", "if", "anime", ".", "complete", "else", "''", ",", "available", ",", ")", ")", "state", ".", "results", "[", "'db'", "]", ".", "set", "(", "results", ")", "state", ".", "results", "[", "'db'", "]", ".", "print", "(", ")" ]
Calculate length of record excluding metadata .
def record_content_length ( self ) : untldict = py2dict ( self ) untldict . pop ( 'meta' , None ) return len ( str ( untldict ) )
12,265
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L204-L208
[ "def", "setup", "(", "self", ",", "environ", ")", ":", "request", "=", "wsgi_request", "(", "environ", ")", "cfg", "=", "request", ".", "cache", ".", "cfg", "loop", "=", "request", ".", "cache", ".", "_loop", "self", ".", "store", "=", "create_store", "(", "cfg", ".", "data_store", ",", "loop", "=", "loop", ")", "pubsub", "=", "self", ".", "store", ".", "pubsub", "(", "protocol", "=", "Protocol", "(", ")", ")", "channel", "=", "'%s_webchat'", "%", "self", ".", "name", "ensure_future", "(", "pubsub", ".", "subscribe", "(", "channel", ")", ",", "loop", "=", "loop", ")", "return", "WsgiHandler", "(", "[", "Router", "(", "'/'", ",", "get", "=", "self", ".", "home_page", ")", ",", "WebSocket", "(", "'/message'", ",", "Chat", "(", "pubsub", ",", "channel", ")", ")", ",", "Router", "(", "'/rpc'", ",", "post", "=", "Rpc", "(", "pubsub", ",", "channel", ")", ",", "response_content_types", "=", "JSON_CONTENT_TYPES", ")", "]", ",", "[", "AsyncResponseMiddleware", ",", "GZipMiddleware", "(", "min_length", "=", "20", ")", "]", ")" ]
Create groupings of form elements .
def create_form_data ( self , * * kwargs ) : # Get the specified keyword arguments. children = kwargs . get ( 'children' , [ ] ) sort_order = kwargs . get ( 'sort_order' , None ) solr_response = kwargs . get ( 'solr_response' , None ) superuser = kwargs . get ( 'superuser' , False ) # Get the vocabularies to pull the qualifiers from. vocabularies = self . get_vocabularies ( ) # Loop through all UNTL elements in the Python object. for element in children : # Add children that are missing from the form. element . children = add_missing_children ( element . contained_children , element . children , ) # Add the form attribute to the element. element . add_form ( vocabularies = vocabularies , qualifier = element . qualifier , content = element . content , superuser = superuser , ) # Element can contain children. if element . form . has_children : # If the parent has a qualifier, # create a representative form element for the parent. if getattr ( element . form , 'qualifier_name' , False ) : add_parent = PARENT_FORM [ element . form . qualifier_name ] ( content = element . qualifier , ) # Add the parent to the list of child elements. element . children . append ( add_parent ) # Sort the elements by the index of child sort. element . children . sort ( key = lambda obj : element . form . child_sort . index ( obj . tag ) ) # Loop through the element's children (if it has any). for child in element . children : # Add the form attribute to the element. child . add_form ( vocabularies = vocabularies , qualifier = child . qualifier , content = child . content , parent_tag = element . tag , superuser = superuser , ) element_group_dict = { } # Group related objects together. for element in children : # Make meta-hidden its own group. if element . form . name == 'meta' and element . qualifier == 'hidden' : element_group_dict [ 'hidden' ] = [ element ] # Element is not meta-hidden. else : # Make sure the dictionary key exists. if element . form . name not in element_group_dict : element_group_dict [ element . form . name ] = [ ] element_group_dict [ element . form . name ] . append ( element ) # If the hidden meta element doesn't exist, add it to its own group. if 'hidden' not in element_group_dict : hidden_element = PYUNTL_DISPATCH [ 'meta' ] ( qualifier = 'hidden' , content = 'False' ) hidden_element . add_form ( vocabularies = vocabularies , qualifier = hidden_element . qualifier , content = hidden_element . content , superuser = superuser , ) element_group_dict [ 'hidden' ] = [ hidden_element ] # Create a list of group object elements. element_list = self . create_form_groupings ( vocabularies , solr_response , element_group_dict , sort_order , ) # Return the list of UNTL elements with form data added. return element_list
12,266
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L216-L295
[ "def", "set_cache_token", "(", "self", ",", "token_data", ")", ":", "if", "self", ".", "conn", "is", "None", ":", "raise", "CacheException", "(", "'Redis is not connected'", ")", "token", "=", "token_data", "[", "'auth_token'", "]", "token_expires", "=", "token_data", "[", "'expires_at'", "]", "roles", "=", "token_data", "[", "'roles'", "]", "try", ":", "datetime_object", "=", "datetime", ".", "strptime", "(", "token_expires", ",", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", "except", "ValueError", ":", "datetime_object", "=", "datetime", ".", "strptime", "(", "token_expires", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "ttl", "=", "(", "datetime", ".", "utcnow", "(", ")", ".", "now", "(", ")", "-", "datetime_object", ")", "token_data", "=", "json", ".", "dumps", "(", "{", "'expires_at'", ":", "token_expires", ",", "'roles'", ":", "roles", ",", "'user'", ":", "token_data", "[", "'user'", "]", "}", ")", "self", ".", "conn", ".", "set", "(", "token", ",", "token_data", ",", "ex", "=", "ttl", ".", "seconds", ")" ]
Create a group object from groupings of element objects .
def create_form_groupings ( self , vocabularies , solr_response , element_group_dict , sort_order ) : element_list = [ ] # Loop through the group dictionary. for group_name , group_list in element_group_dict . items ( ) : # Create the element group. element_group = UNTL_GROUP_DISPATCH [ group_name ] ( vocabularies = vocabularies , solr_response = solr_response , group_name = group_name , group_list = group_list , ) # Loop through the adjustable forms of the group if they exist. if element_group . adjustable_form is not None : for adj_name , form_dict in element_group . adjustable_form . items ( ) : # If an item has an adjustable form, # append it to the adjustable list. if form_dict [ 'value_py' ] is not None : self . adjustable_items . append ( adj_name ) # Append the group to the element group list. element_list . append ( element_group ) # Sort the elements by the index of sort_order pre-ordered list. element_list . sort ( key = lambda obj : sort_order . index ( obj . group_name ) ) return element_list
12,267
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L297-L324
[ "def", "_api_arguments", "(", "self", ")", ":", "# TC main >= 4.4 token will be passed to jobs.", "self", ".", "add_argument", "(", "'--tc_token'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token'", ")", "self", ".", "add_argument", "(", "'--tc_token_expires'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token Expiration Time'", ",", "type", "=", "int", ",", ")", "# TC Integrations Server or TC main < 4.4", "self", ".", "add_argument", "(", "'--api_access_id'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Access ID'", ",", "required", "=", "False", ")", "self", ".", "add_argument", "(", "'--api_secret_key'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Secret Key'", ",", "required", "=", "False", ")", "# Validate ThreatConnect SSL certificate", "self", ".", "add_argument", "(", "'--tc_verify'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Validate the ThreatConnect SSL Cert'", ")" ]
Get the vocabularies to pull the qualifiers from .
def get_vocabularies ( self ) : # Timeout in seconds. timeout = 15 socket . setdefaulttimeout ( timeout ) # Create the ordered vocabulary URL. vocab_url = VOCABULARIES_URL . replace ( 'all' , 'all-verbose' ) # Request the vocabularies dictionary. try : vocab_dict = eval ( urllib2 . urlopen ( vocab_url ) . read ( ) ) except : raise UNTLStructureException ( 'Could not retrieve the vocabularies' ) return vocab_dict
12,268
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L326-L338
[ "def", "start", "(", "url", ",", "funs", "=", "None", ",", "tags", "=", "None", ")", ":", "if", "__opts__", ".", "get", "(", "'id'", ")", ".", "endswith", "(", "'_master'", ")", ":", "instance", "=", "'master'", "else", ":", "instance", "=", "'minion'", "event_bus", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "instance", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ",", "opts", "=", "__opts__", ")", "while", "True", ":", "event", "=", "event_bus", ".", "get_event", "(", "full", "=", "True", ")", "if", "event", ":", "publish", "=", "True", "if", "tags", "and", "isinstance", "(", "tags", ",", "list", ")", ":", "found_match", "=", "False", "for", "tag", "in", "tags", ":", "if", "fnmatch", ".", "fnmatch", "(", "event", "[", "'tag'", "]", ",", "tag", ")", ":", "found_match", "=", "True", "publish", "=", "found_match", "if", "funs", "and", "'fun'", "in", "event", "[", "'data'", "]", ":", "if", "not", "event", "[", "'data'", "]", "[", "'fun'", "]", "in", "funs", ":", "publish", "=", "False", "if", "publish", ":", "_logstash", "(", "url", ",", "event", "[", "'data'", "]", ")" ]
Create a UNTL document in a string from a UNTL metadata root object .
def create_xml_string ( self ) : root = self . create_xml ( ) xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True ) return xml
12,269
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L358-L369
[ "def", "compare_hives", "(", "fs0", ",", "fs1", ")", ":", "registries", "=", "[", "]", "for", "path", "in", "chain", "(", "registries_path", "(", "fs0", ".", "fsroot", ")", ",", "user_registries", "(", "fs0", ",", "fs1", ")", ")", ":", "if", "fs0", ".", "checksum", "(", "path", ")", "!=", "fs1", ".", "checksum", "(", "path", ")", ":", "registries", ".", "append", "(", "path", ")", "return", "registries" ]
Create an ElementTree representation of the object .
def create_xml ( self , useNamespace = False ) : UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/' UNTL = '{%s}' % UNTL_NAMESPACE NSMAP = { 'untl' : UNTL_NAMESPACE } if useNamespace : root = Element ( UNTL + self . tag , nsmap = NSMAP ) else : root = Element ( self . tag ) # Sort the elements by the index of # UNTL_XML_ORDER pre-ordered list. self . sort_untl ( UNTL_XML_ORDER ) # Create an XML structure from field list. for element in self . children : if useNamespace : create_untl_xml_subelement ( root , element , UNTL ) else : create_untl_xml_subelement ( root , element ) return root
12,270
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L371-L392
[ "def", "fetch_csv", "(", "self", ",", "url", ",", "pre_func", "=", "None", ",", "post_func", "=", "None", ",", "date_column", "=", "'date'", ",", "date_format", "=", "None", ",", "timezone", "=", "pytz", ".", "utc", ".", "zone", ",", "symbol", "=", "None", ",", "mask", "=", "True", ",", "symbol_column", "=", "None", ",", "special_params_checker", "=", "None", ",", "country_code", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "country_code", "is", "None", ":", "country_code", "=", "self", ".", "default_fetch_csv_country_code", "(", "self", ".", "trading_calendar", ",", ")", "# Show all the logs every time fetcher is used.", "csv_data_source", "=", "PandasRequestsCSV", "(", "url", ",", "pre_func", ",", "post_func", ",", "self", ".", "asset_finder", ",", "self", ".", "trading_calendar", ".", "day", ",", "self", ".", "sim_params", ".", "start_session", ",", "self", ".", "sim_params", ".", "end_session", ",", "date_column", ",", "date_format", ",", "timezone", ",", "symbol", ",", "mask", ",", "symbol_column", ",", "data_frequency", "=", "self", ".", "data_frequency", ",", "country_code", "=", "country_code", ",", "special_params_checker", "=", "special_params_checker", ",", "*", "*", "kwargs", ")", "# ingest this into dataportal", "self", ".", "data_portal", ".", "handle_extra_source", "(", "csv_data_source", ".", "df", ",", "self", ".", "sim_params", ")", "return", "csv_data_source" ]
Convert a UNTL Python object into a UNTL Python dictionary .
def create_element_dict ( self ) : untl_dict = { } # Loop through all UNTL elements in the Python object. for element in self . children : # If an entry for the element list hasn't been made in the # dictionary, start an empty element list. if element . tag not in untl_dict : untl_dict [ element . tag ] = [ ] # Create a dictionary to put the element into. # Add any qualifier. element_dict = { } if element . qualifier is not None : element_dict [ 'qualifier' ] = element . qualifier # Add any children that have content. if len ( element . contained_children ) > 0 : child_dict = { } for child in element . children : if child . content is not None : child_dict [ child . tag ] = child . content # Set the element's content as the dictionary # of children elements. element_dict [ 'content' ] = child_dict # The element has content, but no children. elif element . content is not None : element_dict [ 'content' ] = element . content # Append the dictionary element to the element list. untl_dict [ element . tag ] . append ( element_dict ) return untl_dict
12,271
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L394-L423
[ "def", "write_bits", "(", "self", ",", "*", "args", ")", ":", "# Would be nice to make this a bit smarter", "if", "len", "(", "args", ")", ">", "8", ":", "raise", "ValueError", "(", "\"Can only write 8 bits at a time\"", ")", "self", ".", "_output_buffer", ".", "append", "(", "chr", "(", "reduce", "(", "lambda", "x", ",", "y", ":", "xor", "(", "x", ",", "args", "[", "y", "]", "<<", "y", ")", ",", "xrange", "(", "len", "(", "args", ")", ")", ",", "0", ")", ")", ")", "return", "self" ]
Create a UNTL file .
def create_xml_file ( self , untl_filename ) : try : f = open ( untl_filename , 'w' ) f . write ( self . create_xml_string ( ) . encode ( 'utf-8' ) ) f . close ( ) except : raise UNTLStructureException ( 'Failed to create UNTL XML file. File: %s' % ( untl_filename ) )
12,272
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L425-L437
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
Sort the UNTL Python object by the index of a sort structure pre - ordered list .
def sort_untl ( self , sort_structure ) : self . children . sort ( key = lambda obj : sort_structure . index ( obj . tag ) )
12,273
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L439-L443
[ "def", "removeAllEntitlements", "(", "self", ",", "appId", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"appId\"", ":", "appId", "}", "url", "=", "self", ".", "_url", "+", "\"/licenses/removeAllEntitlements\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Create a form dictionary with the key being the element name and the value being a list of form element objects .
def generate_form_data ( self , * * kwargs ) : # Add elements that are missing from the form. self . children = add_missing_children ( self . contained_children , self . children ) # Add children to the keyword arguments. kwargs [ 'children' ] = self . children # Create the form object. return FormGenerator ( * * kwargs )
12,274
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L449-L461
[ "def", "seconds_left", "(", "self", ")", ":", "return", "int", "(", "(", "self", ".", "_ENDDATE", ".", "datetime", "-", "Date", "(", "self", ")", ".", "datetime", ")", ".", "total_seconds", "(", ")", ")" ]
Define the expanded qualifier name .
def contributor_director ( * * kwargs ) : if kwargs . get ( 'qualifier' ) in ETD_MS_CONTRIBUTOR_EXPANSION : # Return the element object. return ETD_MSContributor ( role = ETD_MS_CONTRIBUTOR_EXPANSION [ kwargs . get ( 'qualifier' ) ] , * * kwargs ) else : return None
12,275
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L211-L220
[ "def", "main", "(", "args", ")", ":", "random", ".", "seed", "(", ")", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logging", ".", "info", "(", "'Created temporary directory: %s'", ",", "temp_dir", ")", "validator", "=", "SubmissionValidator", "(", "source_dir", "=", "args", ".", "source_dir", ",", "target_dir", "=", "args", ".", "target_dir", ",", "temp_dir", "=", "temp_dir", ",", "do_copy", "=", "args", ".", "copy", ",", "use_gpu", "=", "args", ".", "use_gpu", ",", "containers_file", "=", "args", ".", "containers_file", ")", "validator", ".", "run", "(", ")", "logging", ".", "info", "(", "'Deleting temporary directory: %s'", ",", "temp_dir", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "'-rf'", ",", "temp_dir", "]", ")" ]
Direct which class should be used based on the date qualifier or if the date should be converted at all .
def date_director ( * * kwargs ) : # If the date is a creation date, return the element object. if kwargs . get ( 'qualifier' ) == 'creation' : return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) elif kwargs . get ( 'qualifier' ) != 'digitized' : # Return the element object. return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) else : return None
12,276
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L234-L245
[ "def", "complexes", "(", "network", ",", "state", ")", ":", "engine", "=", "FindIrreducibleComplexes", "(", "possible_complexes", "(", "network", ",", "state", ")", ")", "return", "engine", ".", "run", "(", "config", ".", "PARALLEL_COMPLEX_EVALUATION", ")" ]
Direct how to handle a subject element .
def subject_director ( * * kwargs ) : if kwargs . get ( 'qualifier' ) not in [ 'KWD' , '' ] : return ETD_MSSubject ( scheme = kwargs . get ( 'qualifier' ) , * * kwargs ) else : return ETD_MSSubject ( content = kwargs . get ( 'content' ) )
12,277
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L262-L267
[ "def", "save", "(", "self", ")", ":", "d", "=", "self", ".", "_to_dict", "(", ")", "if", "len", "(", "d", ".", "get", "(", "'videoIds'", ",", "[", "]", ")", ")", ">", "0", ":", "if", "not", "self", ".", "id", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_playlist'", ",", "playlist", "=", "d", ")", "else", ":", "data", "=", "self", ".", "connection", ".", "post", "(", "'update_playlist'", ",", "playlist", "=", "d", ")", "if", "data", ":", "self", ".", "_load", "(", "data", ")" ]
Get the requested element content from a list of children .
def get_child_content ( self , children , element_name ) : # Loop through the children and get the specified element. for child in children : # If the child is the requested element, return its content. if child . tag == element_name : return child . content return ''
12,278
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L69-L76
[ "def", "update_experiment", "(", ")", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "if", "not", "experiment_dict", ":", "return", "None", "for", "key", "in", "experiment_dict", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "experiment_dict", "[", "key", "]", ",", "dict", ")", ":", "if", "experiment_dict", "[", "key", "]", ".", "get", "(", "'status'", ")", "!=", "'STOPPED'", ":", "nni_config", "=", "Config", "(", "experiment_dict", "[", "key", "]", "[", "'fileName'", "]", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "not", "detect_process", "(", "rest_pid", ")", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'status'", ",", "'STOPPED'", ")", "continue", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "startTime", ",", "endTime", "=", "get_experiment_time", "(", "rest_port", ")", "if", "startTime", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'startTime'", ",", "startTime", ")", "if", "endTime", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'endTime'", ",", "endTime", ")", "status", "=", "get_experiment_status", "(", "rest_port", ")", "if", "status", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'status'", ",", "status", ")" ]
Function to offset the center of a colormap . Useful for data with a negative min and positive max and you want the middle of the colormap s dynamic range to be at zero
def shiftedColorMap ( cmap , start = 0 , midpoint = 0.5 , stop = 1.0 , name = 'shiftedcmap' ) : cdict = { 'red' : [ ] , 'green' : [ ] , 'blue' : [ ] , 'alpha' : [ ] } # regular index to compute the colors reg_index = np . linspace ( start , stop , 257 ) # shifted index to match the data shift_index = np . hstack ( [ np . linspace ( 0.0 , midpoint , 128 , endpoint = False ) , np . linspace ( midpoint , 1.0 , 129 , endpoint = True ) ] ) for ri , si in zip ( reg_index , shift_index ) : r , g , b , a = cmap ( ri ) cdict [ 'red' ] . append ( ( si , r , r ) ) cdict [ 'green' ] . append ( ( si , g , g ) ) cdict [ 'blue' ] . append ( ( si , b , b ) ) cdict [ 'alpha' ] . append ( ( si , a , a ) ) newcmap = mpl . colors . LinearSegmentedColormap ( name , cdict ) plt . register_cmap ( cmap = newcmap ) return newcmap
12,279
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_residuals.py#L10-L60
[ "def", "getGroups", "(", "self", ",", "proteinId", ")", ":", "return", "[", "self", ".", "groups", "[", "gId", "]", "for", "gId", "in", "self", ".", "_proteinToGroupIds", "[", "proteinId", "]", "]" ]
Return the number of the final inversion result .
def read_lastmodfile ( directory ) : filename = '{0}/exe/inv.lastmod' . format ( directory ) # filename HAS to exist. Otherwise the inversion was not finished if ( not os . path . isfile ( filename ) ) : return None linestring = open ( filename , 'r' ) . readline ( ) . strip ( ) linestring = linestring . replace ( "\n" , '' ) linestring = linestring . replace ( ".mag" , '' ) linestring = linestring . replace ( "../inv/rho" , '' ) return linestring
12,280
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_residuals.py#L63-L76
[ "def", "plotDutyCycles", "(", "dutyCycle", ",", "filePath", ")", ":", "_", ",", "entropy", "=", "binaryEntropy", "(", "dutyCycle", ")", "bins", "=", "np", ".", "linspace", "(", "0.0", ",", "0.3", ",", "200", ")", "plt", ".", "hist", "(", "dutyCycle", ",", "bins", ",", "alpha", "=", "0.5", ",", "label", "=", "'All cols'", ")", "plt", ".", "title", "(", "\"Histogram of duty cycles, entropy=\"", "+", "str", "(", "float", "(", "entropy", ")", ")", ")", "plt", ".", "xlabel", "(", "\"Duty cycle\"", ")", "plt", ".", "ylabel", "(", "\"Number of units\"", ")", "plt", ".", "savefig", "(", "filePath", ")", "plt", ".", "close", "(", ")" ]
Set an handler for given event .
def setHandler ( self , event_name , callback ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) if callable ( event_name ) : raise TypeError ( '{} is not callable' . format ( callback ) ) self . handlers [ event_name ] = callback
12,281
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L59-L65
[ "def", "save", "(", "self", ",", "create_multiple_renditions", "=", "True", ",", "preserve_source_rendition", "=", "True", ",", "encode_to", "=", "enums", ".", "EncodeToEnum", ".", "FLV", ")", ":", "if", "is_ftp_connection", "(", "self", ".", "connection", ")", "and", "len", "(", "self", ".", "assets", ")", ">", "0", ":", "self", ".", "connection", ".", "post", "(", "xml", "=", "self", ".", "to_xml", "(", ")", ",", "assets", "=", "self", ".", "assets", ")", "elif", "not", "self", ".", "id", "and", "self", ".", "_filename", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "self", ".", "_filename", ",", "create_multiple_renditions", "=", "create_multiple_renditions", ",", "preserve_source_rendition", "=", "preserve_source_rendition", ",", "encode_to", "=", "encode_to", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "not", "self", ".", "id", "and", "len", "(", "self", ".", "renditions", ")", ">", "0", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "self", ".", "id", ":", "data", "=", "self", ".", "connection", ".", "post", "(", "'update_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "if", "data", ":", "self", ".", "_load", "(", "data", ")" ]
Check if an event has an handler .
def isHandlerPresent ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) return self . handlers [ event_name ] is not None
12,282
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L67-L71
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Remove handler for given event .
def removeHandler ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) self . handlers [ event_name ] = None
12,283
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L73-L77
[ "def", "save", "(", "self", ",", "create_multiple_renditions", "=", "True", ",", "preserve_source_rendition", "=", "True", ",", "encode_to", "=", "enums", ".", "EncodeToEnum", ".", "FLV", ")", ":", "if", "is_ftp_connection", "(", "self", ".", "connection", ")", "and", "len", "(", "self", ".", "assets", ")", ">", "0", ":", "self", ".", "connection", ".", "post", "(", "xml", "=", "self", ".", "to_xml", "(", ")", ",", "assets", "=", "self", ".", "assets", ")", "elif", "not", "self", ".", "id", "and", "self", ".", "_filename", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "self", ".", "_filename", ",", "create_multiple_renditions", "=", "create_multiple_renditions", ",", "preserve_source_rendition", "=", "preserve_source_rendition", ",", "encode_to", "=", "encode_to", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "not", "self", ".", "id", "and", "len", "(", "self", ".", "renditions", ")", ">", "0", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "self", ".", "id", ":", "data", "=", "self", ".", "connection", ".", "post", "(", "'update_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "if", "data", ":", "self", ".", "_load", "(", "data", ")" ]
Get the number of argument of a fuction .
def _get_fct_number_of_arg ( self , fct ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return len ( inspect . signature ( fct ) . parameters ) return len ( inspect . getargspec ( fct ) [ 0 ] )
12,284
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L95-L100
[ "def", "configure", "(", "self", ",", "organization", ",", "base_url", "=", "''", ",", "ttl", "=", "''", ",", "max_ttl", "=", "''", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'organization'", ":", "organization", ",", "'base_url'", ":", "base_url", ",", "'ttl'", ":", "ttl", ",", "'max_ttl'", ":", "max_ttl", ",", "}", "api_path", "=", "'/v1/auth/{mount_point}/config'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Event tracking handler
def event_tracker ( func ) : @ wraps ( func ) async def wrapper ( * args , * * kwargs ) : """ Wraps function to provide redis tracking """ event = Event ( args [ 0 ] ) session = kwargs [ 'session' ] service_name = session . name await track_event ( event , EventState . started , service_name ) await func ( * args , * * kwargs ) await track_event ( event , EventState . completed , service_name ) return wrapper
12,285
https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/service.py#L19-L35
[ "def", "open_pager", "(", "self", ")", ":", "n_rows", ",", "n_cols", "=", "self", ".", "term", ".", "stdscr", ".", "getmaxyx", "(", ")", "if", "self", ".", "config", "[", "'max_pager_cols'", "]", "is", "not", "None", ":", "n_cols", "=", "min", "(", "n_cols", ",", "self", ".", "config", "[", "'max_pager_cols'", "]", ")", "data", "=", "self", ".", "get_selected_item", "(", ")", "if", "data", "[", "'type'", "]", "==", "'Submission'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'text'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "elif", "data", "[", "'type'", "]", "==", "'Comment'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'body'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "else", ":", "self", ".", "term", ".", "flash", "(", ")" ]
Intends to ensure the received the request is ajax request and it is included in the valid request methods
def ensure_ajax ( valid_request_methods , error_response_context = None ) : def real_decorator ( view_func ) : def wrap_func ( request , * args , * * kwargs ) : if not isinstance ( request , HttpRequest ) : # make sure the request is a django httprequest return generate_error_json_response ( "Invalid request!" , error_response_context ) elif not request . is_ajax ( ) : # ensure the request is an ajax request return generate_error_json_response ( "Invalid request type!" , error_response_context ) elif request . method not in valid_request_methods : # check if the request method is in allowed request methods return generate_error_json_response ( "Invalid request method!" , error_response_context ) else : return view_func ( request , * args , * * kwargs ) wrap_func . __doc__ = view_func . __doc__ wrap_func . __name__ = view_func . __name__ return wrap_func return real_decorator
12,286
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/decorator.py#L8-L38
[ "def", "ParentAndBaseName", "(", "path", ")", ":", "dirname", ",", "basename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "dirname", "=", "dirname", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")", "if", "os", ".", "path", ".", "altsep", ":", "dirname", "=", "dirname", ".", "rstrip", "(", "os", ".", "path", ".", "altsep", ")", "_", ",", "parentname", "=", "os", ".", "path", ".", "split", "(", "dirname", ")", "return", "os", ".", "path", ".", "join", "(", "parentname", ",", "basename", ")" ]
Intends to build an error json response . If the error_response_context is None then we generate this response using data tables format
def generate_error_json_response ( error_dict , error_response_context = None ) : response = error_dict if isinstance ( error_dict , str ) : response = { "error" : response } if error_response_context is None : error_response_context = { 'draw' : 0 , 'recordsTotal' : 0 , 'recordsFiltered' : 0 , 'data' : [ ] } response . update ( error_response_context ) return JsonResponse ( response )
12,287
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/decorator.py#L41-L59
[ "def", "statistic_recommend", "(", "classes", ",", "P", ")", ":", "if", "imbalance_check", "(", "P", ")", ":", "return", "IMBALANCED_RECOMMEND", "if", "binary_check", "(", "classes", ")", ":", "return", "BINARY_RECOMMEND", "return", "MULTICLASS_RECOMMEND" ]
Exported symbols for a given package does not have any prefix . So I can drop all import paths that are file specific and merge all symbols . Assuming all files in the given package has mutual exclusive symbols .
def _mergeGoSymbols ( self , jsons = [ ] ) : # <siXy> imports are per file, exports are per package # on the highest level we have: pkgname, types, funcs, vars, imports. symbols = { } symbols [ "types" ] = [ ] symbols [ "funcs" ] = [ ] symbols [ "vars" ] = [ ] for file_json in jsons : symbols [ "types" ] += file_json [ "types" ] symbols [ "funcs" ] += file_json [ "funcs" ] symbols [ "vars" ] += file_json [ "vars" ] return symbols
12,288
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/symbolsextractor/extractor.py#L203-L222
[ "def", "rich_presence", "(", "self", ")", ":", "kvs", "=", "self", ".", "get_ps", "(", "'rich_presence'", ")", "data", "=", "{", "}", "if", "kvs", ":", "for", "kv", "in", "kvs", ":", "data", "[", "kv", ".", "key", "]", "=", "kv", ".", "value", "return", "data" ]
Read n bytes from mapped view .
def read ( self , n ) : out = ctypes . create_string_buffer ( n ) ctypes . windll . kernel32 . RtlMoveMemory ( out , self . view + self . pos , n ) self . pos += n return out . raw
12,289
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/_winapi.py#L145-L152
[ "def", "set_keepalive", "(", "self", ",", "interval", ")", ":", "pinger", "=", "functools", ".", "partial", "(", "self", ".", "ping", ",", "'keep-alive'", ")", "self", ".", "reactor", ".", "scheduler", ".", "execute_every", "(", "period", "=", "interval", ",", "func", "=", "pinger", ")" ]
Converts fluents to tensors with datatype tf . float32 .
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : output = [ ] for _ , fluent in fluents : tensor = fluent . tensor if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) output . append ( tensor ) return tuple ( output )
12,290
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/transition_simulator.py#L124-L132
[ "def", "getData", "(", "file_id", ",", "ra", ",", "dec", ")", ":", "DATA", "=", "\"www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca\"", "BASE", "=", "\"http://\"", "+", "DATA", "+", "\"/authProxy/getData\"", "archive", "=", "\"CFHT\"", "wcs", "=", "\"corrected\"", "import", "re", "groups", "=", "re", ".", "match", "(", "'^(?P<file_id>\\d{6}).*'", ",", "file_id", ")", "if", "not", "groups", ":", "return", "None", "file_id", "=", "groups", ".", "group", "(", "'file_id'", ")", "file_id", "+=", "\"p\"", "#### THIS IS NOT WORKING YET....", "URL", "=", "BASE", "+", "\"?dataset_name=\"", "+", "file_id", "+", "\"&cutout=circle(\"", "+", "str", "(", "ra", "*", "57.3", ")", "+", "\",\"", "URL", "+=", "str", "(", "dec", "*", "57.3", ")", "+", "\",\"", "+", "str", "(", "5.0", "/", "60.0", ")", "+", "\")\"", "return", "URL" ]
Sets a hyperparameter . Can be used to set an array of hyperparameters .
def set ( self , key , value ) : self . store [ key ] = value return self . store
12,291
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L29-L32
[ "def", "remove_expired_multipartobjects", "(", ")", ":", "delta", "=", "current_app", ".", "config", "[", "'FILES_REST_MULTIPART_EXPIRES'", "]", "expired_dt", "=", "datetime", ".", "utcnow", "(", ")", "-", "delta", "file_ids", "=", "[", "]", "for", "mp", "in", "MultipartObject", ".", "query_expired", "(", "expired_dt", ")", ":", "file_ids", ".", "append", "(", "str", "(", "mp", ".", "file_id", ")", ")", "mp", ".", "delete", "(", ")", "for", "fid", "in", "file_ids", ":", "remove_file_data", ".", "delay", "(", "fid", ")" ]
Gets the ith config
def config_at ( self , i ) : selections = { } for key in self . store : value = self . store [ key ] if isinstance ( value , list ) : selected = i % len ( value ) i = i // len ( value ) selections [ key ] = value [ selected ] else : selections [ key ] = value return Config ( selections )
12,292
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L77-L89
[ "def", "HandleMessageBundles", "(", "self", ",", "request_comms", ",", "response_comms", ")", ":", "messages", ",", "source", ",", "timestamp", "=", "self", ".", "_communicator", ".", "DecodeMessages", "(", "request_comms", ")", "now", "=", "time", ".", "time", "(", ")", "if", "messages", ":", "# Receive messages in line.", "self", ".", "ReceiveMessages", "(", "source", ",", "messages", ")", "# We send the client a maximum of self.max_queue_size messages", "required_count", "=", "max", "(", "0", ",", "self", ".", "max_queue_size", "-", "request_comms", ".", "queue_size", ")", "tasks", "=", "[", "]", "message_list", "=", "rdf_flows", ".", "MessageList", "(", ")", "# Only give the client messages if we are able to receive them in a", "# reasonable time.", "if", "time", ".", "time", "(", ")", "-", "now", "<", "10", ":", "tasks", "=", "self", ".", "DrainTaskSchedulerQueueForClient", "(", "source", ",", "required_count", ")", "message_list", ".", "job", "=", "tasks", "# Encode the message_list in the response_comms using the same API version", "# the client used.", "self", ".", "_communicator", ".", "EncodeMessages", "(", "message_list", ",", "response_comms", ",", "destination", "=", "source", ",", "timestamp", "=", "timestamp", ",", "api_version", "=", "request_comms", ".", "api_version", ")", "return", "source", ",", "len", "(", "messages", ")" ]
Get the best results according to your custom sort method .
def top ( self , sort_by ) : sort = sorted ( self . results , key = sort_by ) return sort
12,293
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L101-L104
[ "def", "get_independencies", "(", "self", ",", "latex", "=", "False", ")", ":", "independencies", "=", "Independencies", "(", ")", "for", "start", "in", "(", "self", ".", "nodes", "(", ")", ")", ":", "rest", "=", "set", "(", "self", ".", "nodes", "(", ")", ")", "-", "{", "start", "}", "for", "r", "in", "range", "(", "len", "(", "rest", ")", ")", ":", "for", "observed", "in", "itertools", ".", "combinations", "(", "rest", ",", "r", ")", ":", "d_seperated_variables", "=", "rest", "-", "set", "(", "observed", ")", "-", "set", "(", "self", ".", "active_trail_nodes", "(", "start", ",", "observed", "=", "observed", ")", "[", "start", "]", ")", "if", "d_seperated_variables", ":", "independencies", ".", "add_assertions", "(", "[", "start", ",", "d_seperated_variables", ",", "observed", "]", ")", "independencies", ".", "reduce", "(", ")", "if", "not", "latex", ":", "return", "independencies", "else", ":", "return", "independencies", ".", "latex_string", "(", ")" ]
Loads a config from disk . Defaults to a random config if none is specified
def load_or_create_config ( self , filename , config = None ) : os . makedirs ( os . path . dirname ( os . path . expanduser ( filename ) ) , exist_ok = True ) if os . path . exists ( filename ) : return self . load ( filename ) if ( config == None ) : config = self . random_config ( ) self . save ( filename , config ) return config
12,294
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L115-L125
[ "def", "wnreld", "(", "a", ",", "op", ",", "b", ")", ":", "assert", "isinstance", "(", "a", ",", "stypes", ".", "SpiceCell", ")", "assert", "b", ".", "dtype", "==", "1", "assert", "isinstance", "(", "b", ",", "stypes", ".", "SpiceCell", ")", "assert", "a", ".", "dtype", "==", "1", "assert", "isinstance", "(", "op", ",", "str", ")", "op", "=", "stypes", ".", "stringToCharP", "(", "op", ".", "encode", "(", "encoding", "=", "'UTF-8'", ")", ")", "return", "bool", "(", "libspice", ".", "wnreld_c", "(", "ctypes", ".", "byref", "(", "a", ")", ",", "op", ",", "ctypes", ".", "byref", "(", "b", ")", ")", ")" ]
Prepare the system to be ready for an undercloud installation .
def configure ( self , repositories ) : self . enable_repositories ( repositories ) self . create_stack_user ( ) self . install_base_packages ( ) self . clean_system ( ) self . yum_update ( allow_reboot = True ) self . install_osp ( ) self . set_selinux ( 'permissive' ) self . fix_hostname ( )
12,295
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L32-L42
[ "def", "clean_weight_files", "(", "cls", ")", ":", "deleted", "=", "[", "]", "for", "f", "in", "cls", ".", "_files", ":", "try", ":", "os", ".", "remove", "(", "f", ")", "deleted", ".", "append", "(", "f", ")", "except", "FileNotFoundError", ":", "pass", "print", "(", "'Deleted %d weight files'", "%", "len", "(", "deleted", ")", ")", "cls", ".", "_files", "=", "[", "]" ]
Deploy an undercloud on the host .
def openstack_undercloud_install ( self ) : instack_undercloud_ver , _ = self . run ( 'repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp' ) if instack_undercloud_ver . rstrip ( '\n' ) == 'instack-undercloud-0:2.2.0-1.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1298189' ) self . run ( "sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" ) self . run ( 'OS_PASSWORD=bob openstack undercloud install' , user = 'stack' ) # NOTE(Gonéri): we also need this after the overcloud deployment if self . run ( 'rpm -qa openstack-ironic-api' ) [ 0 ] . rstrip ( '\n' ) == 'openstack-ironic-api-4.2.2-3.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1297796' ) self . run ( 'systemctl start openstack-ironic-api.service' ) self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'heat stack-list' , user = 'stack' )
12,296
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L56-L70
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'request'", "in", "_dict", ":", "args", "[", "'request'", "]", "=", "MessageRequest", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'request'", ")", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'request\\' not present in Log JSON'", ")", "if", "'response'", "in", "_dict", ":", "args", "[", "'response'", "]", "=", "MessageResponse", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'response'", ")", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'response\\' not present in Log JSON'", ")", "if", "'log_id'", "in", "_dict", ":", "args", "[", "'log_id'", "]", "=", "_dict", ".", "get", "(", "'log_id'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'log_id\\' not present in Log JSON'", ")", "if", "'request_timestamp'", "in", "_dict", ":", "args", "[", "'request_timestamp'", "]", "=", "_dict", ".", "get", "(", "'request_timestamp'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'request_timestamp\\' not present in Log JSON'", ")", "if", "'response_timestamp'", "in", "_dict", ":", "args", "[", "'response_timestamp'", "]", "=", "_dict", ".", "get", "(", "'response_timestamp'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'response_timestamp\\' not present in Log JSON'", ")", "if", "'workspace_id'", "in", "_dict", ":", "args", "[", "'workspace_id'", "]", "=", "_dict", ".", "get", "(", "'workspace_id'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'workspace_id\\' not present in Log JSON'", ")", "if", "'language'", "in", "_dict", ":", "args", "[", "'language'", "]", "=", "_dict", ".", "get", "(", "'language'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'language\\' not present in Log JSON'", ")", "return", "cls", "(", "*", "*", "args", ")" ]
Create a new baremetal flavor .
def create_flavor ( self , name ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal' , user = 'stack' , success_status = ( 0 , 1 ) ) self . run ( 'openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal' , user = 'stack' ) self . run ( 'openstack flavor set --property "capabilities:profile"="baremetal" baremetal' , user = 'stack' )
12,297
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L188-L196
[ "def", "smooth_angle_channels", "(", "self", ",", "channels", ")", ":", "for", "vertex", "in", "self", ".", "vertices", ":", "for", "col", "in", "vertex", ".", "meta", "[", "'rot_ind'", "]", ":", "if", "col", ":", "for", "k", "in", "range", "(", "1", ",", "channels", ".", "shape", "[", "0", "]", ")", ":", "diff", "=", "channels", "[", "k", ",", "col", "]", "-", "channels", "[", "k", "-", "1", ",", "col", "]", "if", "abs", "(", "diff", "+", "360.", ")", "<", "abs", "(", "diff", ")", ":", "channels", "[", "k", ":", ",", "col", "]", "=", "channels", "[", "k", ":", ",", "col", "]", "+", "360.", "elif", "abs", "(", "diff", "-", "360.", ")", "<", "abs", "(", "diff", ")", ":", "channels", "[", "k", ":", ",", "col", "]", "=", "channels", "[", "k", ":", ",", "col", "]", "-", "360." ]
List the Ironic nodes UUID .
def list_nodes ( self ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) ret , _ = self . run ( "ironic node-list --fields uuid|awk '/-.*-/ {print $2}'" , user = 'stack' ) # NOTE(Gonéri): the good new is, the order of the nodes is preserved and follow the one from # the instackenv.json, BUT it may be interesting to add a check. return ret . split ( )
12,298
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L198-L204
[ "def", "_read", "(", "self", ",", "size", ")", ":", "if", "self", ".", "comptype", "==", "\"tar\"", ":", "return", "self", ".", "__read", "(", "size", ")", "c", "=", "len", "(", "self", ".", "dbuf", ")", "while", "c", "<", "size", ":", "buf", "=", "self", ".", "__read", "(", "self", ".", "bufsize", ")", "if", "not", "buf", ":", "break", "try", ":", "buf", "=", "self", ".", "cmp", ".", "decompress", "(", "buf", ")", "except", "IOError", ":", "raise", "ReadError", "(", "\"invalid compressed data\"", ")", "self", ".", "dbuf", "+=", "buf", "c", "+=", "len", "(", "buf", ")", "buf", "=", "self", ".", "dbuf", "[", ":", "size", "]", "self", ".", "dbuf", "=", "self", ".", "dbuf", "[", "size", ":", "]", "return", "buf" ]
Set a flavor to a given ironic node .
def set_flavor ( self , node , flavor ) : command = ( 'ironic node-update {uuid} add ' 'properties/capabilities=profile:{flavor},boot_option:local' ) . format ( uuid = node . uuid , flavor = flavor ) node . flavor = flavor self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( command , user = 'stack' )
12,299
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L206-L219
[ "def", "delete", "(", "table", ",", "session", ",", "conds", ")", ":", "with", "session", ".", "begin_nested", "(", ")", ":", "archive_conds_list", "=", "_get_conditions_list", "(", "table", ",", "conds", ")", "session", ".", "execute", "(", "sa", ".", "delete", "(", "table", ".", "ArchiveTable", ",", "whereclause", "=", "_get_conditions", "(", "archive_conds_list", ")", ")", ")", "conds_list", "=", "_get_conditions_list", "(", "table", ",", "conds", ",", "archive", "=", "False", ")", "session", ".", "execute", "(", "sa", ".", "delete", "(", "table", ",", "whereclause", "=", "_get_conditions", "(", "conds_list", ")", ")", ")" ]