query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Set a cell s value with a series of safety checks
def set_cell ( self , i , j , value ) : bool_tests = [ value in self . _possibles [ i ] [ j ] , value in self . _poss_rows [ i ] , value in self . _poss_cols [ j ] , value in self . _poss_box [ ( i // self . order ) * self . order + ( j // self . order ) ] , value not in self . row ( i ) , value not in self . col ( j ) , value not in self . box ( i , j ) ] if all ( bool_tests ) : self [ i ] [ j ] = value else : raise SudokuHasNoSolutionError ( "This value cannot be set here!" )
7,100
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L177-L202
[ "def", "from_config", "(", "config", ",", "*", "*", "options", ")", ":", "expected_args", "=", "(", "'prefix'", ",", "'realclass'", ")", "for", "arg", "in", "expected_args", ":", "if", "arg", "not", "in", "options", ":", "msg", "=", "\"Required option missing: {0}\"", "raise", "rconfig", ".", "ConfigurationError", "(", "msg", ".", "format", "(", "arg", ")", ")", "# Not logging unrecognized options here, because they might be used", "# by the real event store instantiated below.", "classpath", "=", "options", "[", "'realclass'", "]", "classpath_pieces", "=", "classpath", ".", "split", "(", "'.'", ")", "classname", "=", "classpath_pieces", "[", "-", "1", "]", "modulepath", "=", "'.'", ".", "join", "(", "classpath_pieces", "[", "0", ":", "-", "1", "]", ")", "module", "=", "importlib", ".", "import_module", "(", "modulepath", ")", "estore_class", "=", "getattr", "(", "module", ",", "classname", ")", "return", "RotatedEventStore", "(", "lambda", "fname", ":", "estore_class", "(", "fname", ")", ",", "options", "[", "'path'", "]", ",", "options", "[", "'prefix'", "]", ")" ]
Solve the Sudoku .
def solve ( self , verbose = False , allow_brute_force = True ) : while not self . is_solved : # Update possibles arrays. self . _update ( ) # See if any position can be singled out. singles_found = False or self . _fill_naked_singles ( ) or self . _fill_hidden_singles ( ) # If singles_found is False, then no new uniquely defined cells were found # and this solver cannot solve the Sudoku. We either use brute force or throw an error. # Else, if singles_found is True, run another iteration to see if new singles have shown up. if not singles_found : if allow_brute_force : solution = None try : dlxs = DancingLinksSolver ( copy . deepcopy ( self . _matrix ) ) solutions = dlxs . solve ( ) solution = next ( solutions ) more_solutions = next ( solutions ) except StopIteration as e : if solution is not None : self . _matrix = solution else : raise SudokuHasNoSolutionError ( "Dancing Links solver could not find any solution." ) except Exception as e : raise SudokuHasNoSolutionError ( "Brute Force method failed." ) else : # We end up here if the second `next(solutions)` works, # i.e. if multiple solutions exist. raise SudokuHasMultipleSolutionsError ( "This Sudoku has multiple solutions!" ) self . solution_steps . append ( "BRUTE FORCE - Dancing Links" ) break else : print ( self ) raise SudokuTooDifficultError ( "This Sudoku requires more advanced methods!" ) if verbose : print ( "Sudoku solved in {0} iterations!\n{1}" . format ( len ( self . solution_steps ) , self ) ) for step in self . solution_steps : print ( step )
7,101
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L209-L257
[ "def", "delete", "(", "self", ",", "dataset", ")", ":", "url", "=", "self", ".", "_get_url", "(", "'/api/1.0/meta/dataset/{}/delete'", ".", "format", "(", "dataset", ")", ")", "json_data", "=", "''", "binary_data", "=", "json_data", ".", "encode", "(", ")", "headers", "=", "self", ".", "_get_request_headers", "(", ")", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ",", "binary_data", ",", "headers", ")", "resp", "=", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "str_response", "=", "resp", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "if", "str_response", "!=", "'\"successful\"'", "or", "resp", ".", "status", "<", "200", "or", "resp", ".", "status", ">=", "300", ":", "msg", "=", "'Dataset has not been deleted, because of the following error(s): {}'", ".", "format", "(", "str_response", ")", "raise", "ValueError", "(", "msg", ")" ]
Calculate remaining values for each row column box and finally cell .
def _update ( self ) : # Update possible values in each row, column and box. for i , ( row , col , box ) in enumerate ( zip ( self . row_iter ( ) , self . col_iter ( ) , self . box_iter ( ) ) ) : self . _poss_rows [ i ] = set ( self . _values ) . difference ( set ( row ) ) self . _poss_cols [ i ] = set ( self . _values ) . difference ( set ( col ) ) self . _poss_box [ i ] = set ( self . _values ) . difference ( set ( box ) ) # Iterate over the entire Sudoku and combine information about possible values # from rows, columns and boxes to get a set of possible values for each cell. for i in utils . range_ ( self . side ) : self . _possibles [ i ] = { } for j in utils . range_ ( self . side ) : self . _possibles [ i ] [ j ] = set ( ) if self [ i ] [ j ] > 0 : continue this_box_index = ( ( i // self . order ) * self . order ) + ( j // self . order ) self . _possibles [ i ] [ j ] = self . _poss_rows [ i ] . intersection ( self . _poss_cols [ j ] ) . intersection ( self . _poss_box [ this_box_index ] )
7,102
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L259-L277
[ "def", "create_ambiente", "(", "self", ")", ":", "return", "Ambiente", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Look for naked singles i . e . cells with ony one possible value .
def _fill_naked_singles ( self ) : simple_found = False for i in utils . range_ ( self . side ) : for j in utils . range_ ( self . side ) : if self [ i ] [ j ] > 0 : continue p = self . _possibles [ i ] [ j ] if len ( p ) == 1 : self . set_cell ( i , j , list ( p ) [ 0 ] ) self . solution_steps . append ( self . _format_step ( "NAKED" , ( i , j ) , self [ i ] [ j ] ) ) simple_found = True elif len ( p ) == 0 : raise SudokuHasNoSolutionError ( "Error made! No possible value for ({0},{1})!" . format ( i + 1 , j + 1 ) ) return simple_found
7,103
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L279-L299
[ "def", "setOverlayTransformOverlayRelative", "(", "self", ",", "ulOverlayHandle", ",", "ulOverlayHandleParent", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTransformOverlayRelative", "pmatParentOverlayToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "ulOverlayHandleParent", ",", "byref", "(", "pmatParentOverlayToOverlayTransform", ")", ")", "return", "result", ",", "pmatParentOverlayToOverlayTransform" ]
Look for hidden singles i . e . cells with only one unique possible value in row column or box .
def _fill_hidden_singles ( self ) : for i in utils . range_ ( self . side ) : box_i = ( i // self . order ) * self . order for j in utils . range_ ( self . side ) : box_j = ( j // self . order ) * self . order # Skip if this cell is determined already. if self [ i ] [ j ] > 0 : continue # Look for hidden single in rows. p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == j : continue p = p . difference ( self . _possibles [ i ] [ k ] ) if len ( p ) == 1 : # Found a hidden single in a row! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-ROW" , ( i , j ) , self [ i ] [ j ] ) ) return True # Look for hidden single in columns p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == i : continue p = p . difference ( self . _possibles [ k ] [ j ] ) if len ( p ) == 1 : # Found a hidden single in a column! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-COL" , ( i , j ) , self [ i ] [ j ] ) ) return True # Look for hidden single in box p = self . _possibles [ i ] [ j ] for k in utils . range_ ( box_i , box_i + self . order ) : for kk in utils . range_ ( box_j , box_j + self . order ) : if k == i and kk == j : continue p = p . difference ( self . _possibles [ k ] [ kk ] ) if len ( p ) == 1 : # Found a hidden single in a box! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-BOX" , ( i , j ) , self [ i ] [ j ] ) ) return True return False
7,104
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L301-L353
[ "def", "_GetDelayImportTimestamps", "(", "self", ",", "pefile_object", ")", ":", "delay_import_timestamps", "=", "[", "]", "if", "not", "hasattr", "(", "pefile_object", ",", "'DIRECTORY_ENTRY_DELAY_IMPORT'", ")", ":", "return", "delay_import_timestamps", "for", "importdata", "in", "pefile_object", ".", "DIRECTORY_ENTRY_DELAY_IMPORT", ":", "dll_name", "=", "importdata", ".", "dll", "try", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'replace'", ")", "timestamp", "=", "getattr", "(", "importdata", ".", "struct", ",", "'dwTimeStamp'", ",", "0", ")", "delay_import_timestamps", ".", "append", "(", "[", "dll_name", ",", "timestamp", "]", ")", "return", "delay_import_timestamps" ]
Given a dict representation of a json object generate a DTS Collection
def parse ( cls , resource , direction = "children" , * * additional_parameters ) -> "DtsCollection" : data = jsonld . expand ( resource ) if len ( data ) == 0 : raise JsonLdCollectionMissing ( "Missing collection in JSON" ) data = data [ 0 ] obj = cls ( identifier = resource [ "@id" ] , * * additional_parameters ) obj . _parse_metadata ( data ) obj . _parse_members ( data , direction = direction , * * additional_parameters ) return obj
7,105
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/collections/dts/_base.py#L72-L95
[ "def", "open", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "is_opened", "(", ")", "and", "self", ".", "workbook", ".", "file_path", "==", "file_path", ":", "self", ".", "_logger", ".", "logger", ".", "debug", "(", "\"workbook already opened: {}\"", ".", "format", "(", "self", ".", "workbook", ".", "file_path", ")", ")", "return", "self", ".", "close", "(", ")", "self", ".", "_open", "(", "file_path", ")" ]
Copy current_context into lines down up until lineno
def _filldown ( self , lineno ) : if self . line > lineno : # XXX decorated functions make us jump backwards. # understand this more return self . lines . extend ( self . current_context for _ in range ( self . line , lineno ) ) self . line = lineno
7,106
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L50-L61
[ "def", "_to_rest", "(", "model", ",", "includes", "=", "None", ")", ":", "includes", "=", "includes", "or", "[", "]", "sparse", "=", "goldman", ".", "sess", ".", "req", ".", "fields", ".", "get", "(", "model", ".", "rtype", ",", "[", "]", ")", "if", "sparse", ":", "sparse", "+=", "[", "model", ".", "rid_field", ",", "model", ".", "rtype_field", "]", "sparse", "+=", "includes", "props", "=", "model", ".", "to_primitive", "(", "load_rels", "=", "includes", ",", "sparse_fields", "=", "sparse", ",", ")", "props", "[", "'rid'", "]", "=", "props", ".", "pop", "(", "model", ".", "rid_field", ")", "props", "[", "'rtype'", "]", "=", "props", ".", "pop", "(", "model", ".", "rtype_field", ")", "_to_rest_hide", "(", "model", ",", "props", ")", "_to_rest_rels", "(", "model", ",", "props", ")", "return", "props" ]
Register the current node as a new context block
def _add_section ( self , node ) : self . _filldown ( node . lineno ) # push a new context onto stack self . context . append ( node . name ) self . _update_current_context ( ) for _ in map ( self . visit , iter_child_nodes ( node ) ) : pass # restore current context self . context . pop ( ) self . _update_current_context ( )
7,107
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L63-L78
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Try to find a module name for a file path by stripping off a prefix found in sys . modules .
def _module_name ( filename ) : absfile = os . path . abspath ( filename ) match = filename for base in [ '' ] + sys . path : base = os . path . abspath ( base ) if absfile . startswith ( base ) : match = absfile [ len ( base ) : ] break return SUFFIX_RE . sub ( '' , match ) . lstrip ( '/' ) . replace ( '/' , '.' )
7,108
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L140-L155
[ "def", "LoadData", "(", "self", ",", "data", ",", "custom_properties", "=", "None", ")", ":", "self", ".", "__data", "=", "[", "]", "self", ".", "AppendData", "(", "data", ",", "custom_properties", ")" ]
Build a PythonFile given a dotted module name like a . b . c
def from_modulename ( cls , module_name ) : # XXX make this more robust (pyc files? zip archives? etc) slug = module_name . replace ( '.' , '/' ) paths = [ slug + '.py' , slug + '/__init__.py' ] # always search from current directory for base in [ '' ] + sys . path : for path in paths : fullpath = os . path . join ( base , path ) if os . path . exists ( fullpath ) : return cls ( fullpath , prefix = module_name ) else : raise ValueError ( "Module not found: %s" % module_name )
7,109
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L158-L173
[ "def", "pvalues", "(", "self", ")", ":", "self", ".", "compute_statistics", "(", ")", "lml_alts", "=", "self", ".", "alt_lmls", "(", ")", "lml_null", "=", "self", ".", "null_lml", "(", ")", "lrs", "=", "-", "2", "*", "lml_null", "+", "2", "*", "asarray", "(", "lml_alts", ")", "from", "scipy", ".", "stats", "import", "chi2", "chi2", "=", "chi2", "(", "df", "=", "1", ")", "return", "chi2", ".", "sf", "(", "lrs", ")" ]
Return the 1 - offset right - open range of lines spanned by a particular context name .
def context_range ( self , context ) : if not context . startswith ( self . prefix ) : context = self . prefix + '.' + context lo = hi = None for idx , line_context in enumerate ( self . lines , 1 ) : # context is hierarchical -- context spans itself # and any suffix. if line_context . startswith ( context ) : lo = lo or idx hi = idx if lo is None : raise ValueError ( "Context %s does not exist in file %s" % ( context , self . filename ) ) return lo , hi + 1
7,110
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L179-L208
[ "def", "dumps", "(", "data", ",", "*", "*", "kwargs", ")", ":", "def", "_encoder", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", ".", "isoformat", "(", ")", "if", "hasattr", "(", "value", ",", "\"_data\"", ")", ":", "return", "value", ".", "_data", "raise", "TypeError", "(", "'Could not encode %r'", "%", "value", ")", "return", "json", ".", "dumps", "(", "data", ",", "default", "=", "_encoder", ",", "*", "*", "kwargs", ")" ]
Return the context for a given 1 - offset line number .
def context ( self , line ) : # XXX due to a limitation in Visitor, # non-python code after the last python code # in a file is not added to self.lines, so we # have to guard against IndexErrors. idx = line - 1 if idx >= len ( self . lines ) : return self . prefix return self . lines [ idx ]
7,111
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L210-L221
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Writes the given property list to the appropriate file on disk and returns the absolute filename .
def write ( label , plist , scope = USER ) : fname = compute_filename ( label , scope ) with open ( fname , "wb" ) as f : plistlib . writePlist ( plist , f ) return fname
7,112
https://github.com/infothrill/python-launchd/blob/2cd50579e808851b116f5a26f9b871a32b65ce0e/launchd/plist.py#L49-L61
[ "def", "IOR", "(", "classical_reg1", ",", "classical_reg2", ")", ":", "left", ",", "right", "=", "unpack_reg_val_pair", "(", "classical_reg1", ",", "classical_reg2", ")", "return", "ClassicalInclusiveOr", "(", "left", ",", "right", ")" ]
calculate thermal pressure from thermal expansion and bulk modulus
def alphakt_pth ( v , temp , v0 , alpha0 , k0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : return alpha0 * k0 * ( temp - t_ref )
7,113
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm.py#L5-L21
[ "def", "filename", "(", "self", ")", ":", "client_id", ",", "__", "=", "os", ".", "path", ".", "splitext", "(", "self", ".", "video", ".", "client_video_id", ")", "file_name", "=", "u'{name}-{language}.{format}'", ".", "format", "(", "name", "=", "client_id", ",", "language", "=", "self", ".", "language_code", ",", "format", "=", "self", ".", "file_format", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", "return", "file_name" ]
Extracts output nodes from the standard output and standard error files
def _get_output_nodes ( self , output_path , error_path ) : status = cod_deposition_states . UNKNOWN messages = [ ] if output_path is not None : content = None with open ( output_path ) as f : content = f . read ( ) status , message = CifCodDepositParser . _deposit_result ( content ) messages . extend ( message . split ( '\n' ) ) if error_path is not None : with open ( error_path ) as f : content = f . readlines ( ) lines = [ x . strip ( '\n' ) for x in content ] messages . extend ( lines ) parameters = { 'output_messages' : messages , 'status' : status } output_nodes = [ ] output_nodes . append ( ( 'messages' , Dict ( dict = parameters ) ) ) if status == cod_deposition_states . SUCCESS : return True , output_nodes return False , output_nodes
7,114
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_cod_deposit.py#L35-L63
[ "def", "vapour_pressure", "(", "Temperature", ",", "element", ")", ":", "if", "element", "==", "\"Rb\"", ":", "Tmelt", "=", "39.30", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.857", "-", "4215.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.312", "-", "4040.0", "/", "Temperature", ")", "# Torr.", "elif", "element", "==", "\"Cs\"", ":", "Tmelt", "=", "28.5", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.711", "-", "3999.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.165", "-", "3830.0", "/", "Temperature", ")", "# Torr.", "else", ":", "s", "=", "str", "(", "element", ")", "s", "+=", "\" is not an element in the database for this function.\"", "raise", "ValueError", "(", "s", ")", "P", "=", "P", "*", "101325.0", "/", "760.0", "# Pascals.", "return", "P" ]
This method filter some of the rows where the value is found in each of the columns .
def filter_ ( self , columns , value ) : for column in columns : if column not in self . data . columns : raise ValueError ( "Column %s not in DataFrame columns: %s" % ( column , list ( self . data ) ) ) for column in columns : # Filtering on empty data series doesn't make sense at all and also would raise an error column_len = len ( self . data [ column ] ) if column_len > 0 and column_len != self . data [ column ] . isnull ( ) . sum ( ) : self . data = self . data [ self . data [ column ] != value ] return self . data
7,115
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/dfutils/filter.py#L54-L75
[ "def", "upload", "(", "clk_json", ",", "project", ",", "apikey", ",", "server", ",", "output", ",", "verbose", ")", ":", "if", "verbose", ":", "log", "(", "\"Uploading CLK data from {}\"", ".", "format", "(", "clk_json", ".", "name", ")", ")", "log", "(", "\"To Entity Matching Server: {}\"", ".", "format", "(", "server", ")", ")", "log", "(", "\"Project ID: {}\"", ".", "format", "(", "project", ")", ")", "log", "(", "\"Uploading CLK data to the server\"", ")", "response", "=", "project_upload_clks", "(", "server", ",", "project", ",", "apikey", ",", "clk_json", ")", "if", "verbose", ":", "log", "(", "response", ")", "json", ".", "dump", "(", "response", ",", "output", ")" ]
Raise exception if directory does not exist .
def _check_directory ( directory ) : if directory is not None : if not exists ( directory ) : raise CommandError ( "Cannot run command - directory {0} does not exist" . format ( directory ) ) if not isdir ( directory ) : raise CommandError ( "Cannot run command - specified directory {0} is not a directory." . format ( directory ) )
7,116
https://github.com/crdoconnor/commandlib/blob/b630364fd7b0d189b388e22a7f43235d182e12e4/commandlib/utils.py#L5-L18
[ "def", "_rows_event_to_dict", "(", "e", ",", "stream", ")", ":", "pk_cols", "=", "e", ".", "primary_key", "if", "isinstance", "(", "e", ".", "primary_key", ",", "(", "list", ",", "tuple", ")", ")", "else", "(", "e", ".", "primary_key", ",", ")", "if", "isinstance", "(", "e", ",", "row_event", ".", "UpdateRowsEvent", ")", ":", "sig", "=", "signals", ".", "rows_updated", "action", "=", "'update'", "row_converter", "=", "_convert_update_row", "elif", "isinstance", "(", "e", ",", "row_event", ".", "WriteRowsEvent", ")", ":", "sig", "=", "signals", ".", "rows_inserted", "action", "=", "'insert'", "row_converter", "=", "_convert_write_row", "elif", "isinstance", "(", "e", ",", "row_event", ".", "DeleteRowsEvent", ")", ":", "sig", "=", "signals", ".", "rows_deleted", "action", "=", "'delete'", "row_converter", "=", "_convert_write_row", "else", ":", "assert", "False", ",", "'Invalid binlog event'", "meta", "=", "{", "'time'", ":", "e", ".", "timestamp", ",", "'log_pos'", ":", "stream", ".", "log_pos", ",", "'log_file'", ":", "stream", ".", "log_file", ",", "'schema'", ":", "e", ".", "schema", ",", "'table'", ":", "e", ".", "table", ",", "'action'", ":", "action", ",", "}", "rows", "=", "list", "(", "map", "(", "row_converter", ",", "e", ".", "rows", ")", ")", "for", "row", "in", "rows", ":", "row", "[", "'keys'", "]", "=", "{", "k", ":", "row", "[", "'values'", "]", "[", "k", "]", "for", "k", "in", "pk_cols", "}", "return", "rows", ",", "meta" ]
r Extract the cached tweets database if necessary and load + parse the json .
def load_tweets ( filename = 'tweets.zip' ) : basename , ext = os . path . splitext ( filename ) json_file = basename + '.json' json_path = os . path . join ( DATA_PATH , json_file ) zip_path = os . path . join ( DATA_PATH , basename + '.zip' ) if not os . path . isfile ( json_path ) : zf = ZipFile ( zip_path , 'r' ) zf . extract ( json_file , DATA_PATH ) with open ( json_path , 'rUb' ) as f : return json . load ( f )
7,117
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/features.py#L21-L249
[ "def", "nl_socket_modify_err_cb", "(", "sk", ",", "kind", ",", "func", ",", "arg", ")", ":", "return", "int", "(", "nl_cb_err", "(", "sk", ".", "s_cb", ",", "kind", ",", "func", ",", "arg", ")", ")" ]
API with args object containing configuration parameters
def main ( args ) : global logging , log args = parse_args ( args ) logging . basicConfig ( format = LOG_FORMAT , level = logging . DEBUG if args . verbose else logging . INFO , stream = sys . stdout ) df = cat_tweets ( path = args . path , verbosity = args . verbose + 1 , numtweets = args . numtweets , ignore_suspicious = False ) log . info ( 'Combined {} tweets' . format ( len ( df ) ) ) df = drop_nan_columns ( df ) save_tweets ( df , path = args . path , filename = args . tweetfile ) geo = get_geo ( df , path = args . path , filename = args . geofile ) log . info ( "Combined {} tweets into a single file {} and set asside {} geo tweets in {}" . format ( len ( df ) , args . tweetfile , len ( geo ) , args . geofile ) ) return df , geo
7,118
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/scripts/cat_tweets.py#L90-L105
[ "def", "display", "(", "self", ")", ":", "self", ".", "compositor", ".", "waitGetPoses", "(", "self", ".", "poses", ",", "openvr", ".", "k_unMaxTrackedDeviceCount", ",", "None", ",", "0", ")", "hmd_pose0", "=", "self", ".", "poses", "[", "openvr", ".", "k_unTrackedDeviceIndex_Hmd", "]", "if", "not", "hmd_pose0", ".", "bPoseIsValid", ":", "return", "# hmd_pose = hmd_pose0.mDeviceToAbsoluteTracking\r", "# 1) On-screen render:\r", "if", "True", ":", "glClearColor", "(", "0.8", ",", "0.4", ",", "0.4", ",", "0", ")", "# Pink background\r", "glClear", "(", "GL_COLOR_BUFFER_BIT", ")", "# glutSwapBuffers()\r", "glFlush", "(", ")", "# Single buffer\r", "# 2) VR render\r", "# TODO: render different things to each eye\r", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "self", ".", "fb", ")", "glClearColor", "(", "0.8", ",", "0.4", ",", "0.4", ",", "0", ")", "# Pink background\r", "glClear", "(", "GL_COLOR_BUFFER_BIT", ")", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "0", ")", "#\r", "# TODO: use different textures for each eye\r", "self", ".", "compositor", ".", "submit", "(", "openvr", ".", "Eye_Left", ",", "self", ".", "texture", ")", "self", ".", "compositor", ".", "submit", "(", "openvr", ".", "Eye_Right", ",", "self", ".", "texture", ")", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "0", ")" ]
Drop columns that are mostly NaNs
def drop_nan_columns ( df , thresh = 325 ) : if thresh < 1 : thresh = int ( thresh * df ) return df . dropna ( axis = 1 , thresh = thresh , inplace = False )
7,119
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/scripts/cat_tweets.py#L184-L191
[ "def", "parse_url", "(", "url", ")", ":", "parsed", "=", "url", "if", "not", "url", ".", "startswith", "(", "\"http://\"", ")", "and", "not", "url", ".", "startswith", "(", "\"https://\"", ")", ":", "# if url is like www.yahoo.com", "parsed", "=", "\"http://\"", "+", "parsed", "elif", "url", ".", "startswith", "(", "\"https://\"", ")", ":", "parsed", "=", "parsed", "[", "8", ":", "]", "parsed", "=", "\"http://\"", "+", "parsed", "index_hash", "=", "parsed", ".", "rfind", "(", "\"#\"", ")", "# remove trailing #", "index_slash", "=", "parsed", ".", "rfind", "(", "\"/\"", ")", "if", "index_hash", ">", "index_slash", ":", "parsed", "=", "parsed", "[", "0", ":", "index_hash", "]", "return", "parsed" ]
We can just link a old backup entry
def fast_deduplication_backup ( self , old_backup_entry , process_bar ) : # TODO: merge code with parts from deduplication_backup() src_path = self . dir_path . resolved_path log . debug ( "*** fast deduplication backup: '%s'" , src_path ) old_file_path = old_backup_entry . get_backup_path ( ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : try : old_file_path . link ( self . path_helper . abs_dst_filepath ) # call os.link() except OSError as err : log . error ( "Can't link '%s' to '%s': %s" % ( old_file_path , self . path_helper . abs_dst_filepath , err ) ) log . info ( "Mark %r with 'no link source'." , old_backup_entry ) old_backup_entry . no_link_source = True old_backup_entry . save ( ) # do a normal copy backup self . deduplication_backup ( process_bar ) return hash_hexdigest = old_backup_entry . content_info . hash_hexdigest hash_file . write ( hash_hexdigest ) file_size = self . dir_path . stat . st_size if file_size > 0 : # tqdm will not accept 0 bytes files ;) process_bar . update ( file_size ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) if self . _SIMULATE_SLOW_SPEED : log . error ( "Slow down speed for tests!" ) time . sleep ( self . _SIMULATE_SLOW_SPEED ) self . fast_backup = True # Was a fast backup used? self . file_linked = True
7,120
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L100-L154
[ "def", "process_task", "(", "self", ")", ":", "if", "_debug", ":", "ClientSSM", ".", "_debug", "(", "\"process_task\"", ")", "if", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request_timeout", "(", ")", "elif", "self", ".", "state", "==", "AWAIT_CONFIRMATION", ":", "self", ".", "await_confirmation_timeout", "(", ")", "elif", "self", ".", "state", "==", "SEGMENTED_CONFIRMATION", ":", "self", ".", "segmented_confirmation_timeout", "(", ")", "elif", "self", ".", "state", "==", "COMPLETED", ":", "pass", "elif", "self", ".", "state", "==", "ABORTED", ":", "pass", "else", ":", "e", "=", "RuntimeError", "(", "\"invalid state\"", ")", "ClientSSM", ".", "_exception", "(", "\"exception: %r\"", ",", "e", ")", "raise", "e" ]
Backup the current file and compare the content .
def deduplication_backup ( self , process_bar ) : self . fast_backup = False # Was a fast backup used? src_path = self . dir_path . resolved_path log . debug ( "*** deduplication backup: '%s'" , src_path ) log . debug ( "abs_src_filepath: '%s'" , self . path_helper . abs_src_filepath ) log . debug ( "abs_dst_filepath: '%s'" , self . path_helper . abs_dst_filepath ) log . debug ( "abs_dst_hash_filepath: '%s'" , self . path_helper . abs_dst_hash_filepath ) log . debug ( "abs_dst_dir: '%s'" , self . path_helper . abs_dst_path ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) try : try : with self . path_helper . abs_src_filepath . open ( "rb" ) as in_file : with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : with self . path_helper . abs_dst_filepath . open ( "wb" ) as out_file : hash = self . _deduplication_backup ( self . dir_path , in_file , out_file , process_bar ) hash_hexdigest = hash . hexdigest ( ) hash_file . write ( hash_hexdigest ) except OSError as err : # FIXME: Better error message raise BackupFileError ( "Skip file %s error: %s" % ( self . path_helper . abs_src_filepath , err ) ) except KeyboardInterrupt : # Try to remove created files try : self . path_helper . abs_dst_filepath . unlink ( ) except OSError : pass try : self . path_helper . abs_dst_hash_filepath . unlink ( ) except OSError : pass raise KeyboardInterrupt old_backup_entry = deduplicate ( self . path_helper . abs_dst_filepath , hash_hexdigest ) if old_backup_entry is None : log . debug ( "File is unique." ) self . file_linked = False # Was a hardlink used? else : log . debug ( "File was deduplicated via hardlink to: %s" % old_backup_entry ) self . file_linked = True # Was a hardlink used? # set origin access/modified times to the new created backup file atime_ns = self . dir_path . stat . st_atime_ns mtime_ns = self . dir_path . stat . st_mtime_ns self . path_helper . abs_dst_filepath . utime ( ns = ( atime_ns , mtime_ns ) ) # call os.utime() log . debug ( "Set mtime to: %s" % mtime_ns ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) self . fast_backup = False
7,121
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L156-L225
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Backup one dir item
def _backup_dir_item ( self , dir_path , process_bar ) : self . path_helper . set_src_filepath ( dir_path ) if self . path_helper . abs_src_filepath is None : self . total_errored_items += 1 log . info ( "Can't backup %r" , dir_path ) # self.summary(no, dir_path.stat.st_mtime, end=" ") if dir_path . is_symlink : self . summary ( "TODO Symlink: %s" % dir_path ) return if dir_path . resolve_error is not None : self . summary ( "TODO resolve error: %s" % dir_path . resolve_error ) pprint_path ( dir_path ) return if dir_path . different_path : self . summary ( "TODO different path:" ) pprint_path ( dir_path ) return if dir_path . is_dir : self . summary ( "TODO dir: %s" % dir_path ) elif dir_path . is_file : # self.summary("Normal file: %s", dir_path) file_backup = FileBackup ( dir_path , self . path_helper , self . backup_run ) old_backup_entry = self . fast_compare ( dir_path ) if old_backup_entry is not None : # We can just link the file from a old backup file_backup . fast_deduplication_backup ( old_backup_entry , process_bar ) else : file_backup . deduplication_backup ( process_bar ) assert file_backup . fast_backup is not None , dir_path . path assert file_backup . file_linked is not None , dir_path . path file_size = dir_path . stat . st_size if file_backup . file_linked : # os.link() was used self . total_file_link_count += 1 self . total_stined_bytes += file_size else : self . total_new_file_count += 1 self . total_new_bytes += file_size if file_backup . fast_backup : self . total_fast_backup += 1 else : self . summary ( "TODO:" % dir_path ) pprint_path ( dir_path )
7,122
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L456-L511
[ "def", "_send_to_timeseries", "(", "self", ",", "message", ")", ":", "logging", ".", "debug", "(", "\"MESSAGE=\"", "+", "str", "(", "message", ")", ")", "result", "=", "None", "try", ":", "ws", "=", "self", ".", "_get_websocket", "(", ")", "ws", ".", "send", "(", "json", ".", "dumps", "(", "message", ")", ")", "result", "=", "ws", ".", "recv", "(", ")", "except", "(", "websocket", ".", "WebSocketConnectionClosedException", ",", "Exception", ")", "as", "e", ":", "logging", ".", "debug", "(", "\"Connection failed, will try again.\"", ")", "logging", ".", "debug", "(", "e", ")", "ws", "=", "self", ".", "_get_websocket", "(", "reuse", "=", "False", ")", "ws", ".", "send", "(", "json", ".", "dumps", "(", "message", ")", ")", "result", "=", "ws", ".", "recv", "(", ")", "logging", ".", "debug", "(", "\"RESULT=\"", "+", "str", "(", "result", ")", ")", "return", "result" ]
print some status information in between .
def print_update ( self ) : print ( "\r\n" ) now = datetime . datetime . now ( ) print ( "Update info: (from: %s)" % now . strftime ( "%c" ) ) current_total_size = self . total_stined_bytes + self . total_new_bytes if self . total_errored_items : print ( " * WARNING: %i omitted files!" % self . total_errored_items ) print ( " * fast backup: %i files" % self . total_fast_backup ) print ( " * new content saved: %i files (%s %.1f%%)" % ( self . total_new_file_count , human_filesize ( self . total_new_bytes ) , to_percent ( self . total_new_bytes , current_total_size ) , ) ) print ( " * stint space via hardlinks: %i files (%s %.1f%%)" % ( self . total_file_link_count , human_filesize ( self . total_stined_bytes ) , to_percent ( self . total_stined_bytes , current_total_size ) , ) ) duration = default_timer ( ) - self . start_time performance = current_total_size / duration / 1024.0 / 1024.0 print ( " * present performance: %.1fMB/s\n" % performance )
7,123
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L551-L586
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Run any cod - tools calculation for the given CifData node .
def cli ( code , cif , parameters , daemon ) : from aiida import orm from aiida . plugins import factories from aiida_codtools . common . cli import CliParameters , CliRunner from aiida_codtools . common . resources import get_default_options process = factories . CalculationFactory ( code . get_attribute ( 'input_plugin' ) ) parameters = CliParameters . from_string ( parameters ) . get_dictionary ( ) inputs = { 'cif' : cif , 'code' : code , 'metadata' : { 'options' : get_default_options ( ) } } if parameters : inputs [ 'parameters' ] = orm . Dict ( dict = parameters ) cli_runner = CliRunner ( process , inputs ) cli_runner . run ( daemon = daemon )
7,124
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/cli/calculations/cod_tools.py#L22-L50
[ "def", "_get_fieldsets_post_form_or_formset", "(", "self", ",", "request", ",", "form", ",", "obj", "=", "None", ")", ":", "base_fields", "=", "self", ".", "replace_orig_field", "(", "form", ".", "base_fields", ".", "keys", "(", ")", ")", "fields", "=", "base_fields", "+", "list", "(", "self", ".", "get_readonly_fields", "(", "request", ",", "obj", ")", ")", "return", "[", "(", "None", ",", "{", "'fields'", ":", "self", ".", "replace_orig_field", "(", "fields", ")", "}", ")", "]" ]
DEPRECATED prepare OpenStack basic environment
def make ( parser ) : s = parser . add_subparsers ( title = 'commands' , metavar = 'COMMAND' , help = 'description' , ) def gen_pass_f ( args ) : gen_pass ( ) gen_pass_parser = s . add_parser ( 'gen-pass' , help = 'generate the password' ) gen_pass_parser . set_defaults ( func = gen_pass_f ) def cmd_f ( args ) : cmd ( args . user , args . hosts . split ( ',' ) , args . key_filename , args . password , args . run ) cmd_parser = s . add_parser ( 'cmd' , help = 'run command line on the target host' ) cmd_parser . add_argument ( '--run' , help = 'the command running on the remote node' , action = 'store' , default = None , dest = 'run' ) cmd_parser . set_defaults ( func = cmd_f )
7,125
https://github.com/jiasir/playback/blob/58b2a5d669dcfaa8cad50c544a4b068dcacf9b69/playback/cli/environment.py#L39-L57
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Convert column name to filter .
def pw_converter ( handler , flt ) : import peewee as pw if isinstance ( flt , Filter ) : return flt model = handler . model field = getattr ( model , flt ) if isinstance ( field , pw . BooleanField ) : return PWBoolFilter ( flt ) if field . choices : choices = [ ( Filter . default , '---' ) ] + list ( field . choices ) return PWChoiceFilter ( flt , choices = choices ) return PWFilter ( flt )
7,126
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L24-L41
[ "def", "run", "(", "self", ")", ":", "port", ",", "tensorboard_process", "=", "self", ".", "create_tensorboard_process", "(", ")", "LOGGER", ".", "info", "(", "'TensorBoard 0.1.7 at http://localhost:{}'", ".", "format", "(", "port", ")", ")", "while", "not", "self", ".", "estimator", ".", "checkpoint_path", ":", "self", ".", "event", ".", "wait", "(", "1", ")", "with", "self", ".", "_temporary_directory", "(", ")", "as", "aws_sync_dir", ":", "while", "not", "self", ".", "event", ".", "is_set", "(", ")", ":", "args", "=", "[", "'aws'", ",", "'s3'", ",", "'sync'", ",", "self", ".", "estimator", ".", "checkpoint_path", ",", "aws_sync_dir", "]", "subprocess", ".", "call", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "self", ".", "_sync_directories", "(", "aws_sync_dir", ",", "self", ".", "logdir", ")", "self", ".", "event", ".", "wait", "(", "10", ")", "tensorboard_process", ".", "terminate", "(", ")" ]
Get a description .
def process ( self , * args , * * kwargs ) : super ( RawIDField , self ) . process ( * args , * * kwargs ) if self . object_data : self . description = self . description or str ( self . object_data )
7,127
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L53-L57
[ "def", "_assign_numbers", "(", "self", ")", ":", "first", "=", "self", ".", "select_related", "(", "'point_of_sales'", ",", "'receipt_type'", ")", ".", "first", "(", ")", "next_num", "=", "Receipt", ".", "objects", ".", "fetch_last_receipt_number", "(", "first", ".", "point_of_sales", ",", "first", ".", "receipt_type", ",", ")", "+", "1", "for", "receipt", "in", "self", ".", "filter", "(", "receipt_number__isnull", "=", "True", ")", ":", "# Atomically update receipt number", "Receipt", ".", "objects", ".", "filter", "(", "pk", "=", "receipt", ".", "id", ",", "receipt_number__isnull", "=", "True", ",", ")", ".", "update", "(", "receipt_number", "=", "next_num", ",", ")", "next_num", "+=", "1" ]
Get field value .
def _value ( self ) : if self . data is not None : value = self . data . _data . get ( self . field . to_field . name ) return str ( value ) return ''
7,128
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L59-L64
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Sort current collection .
def sort ( self , request , reverse = False ) : field = self . model . _meta . fields . get ( self . columns_sort ) if not field : return self . collection if reverse : field = field . desc ( ) return self . collection . order_by ( field )
7,129
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L157-L166
[ "def", "apply_spm_config", "(", "overrides", ",", "defaults", ")", ":", "opts", "=", "defaults", ".", "copy", "(", ")", "_adjust_log_file_override", "(", "overrides", ",", "defaults", "[", "'log_file'", "]", ")", "if", "overrides", ":", "opts", ".", "update", "(", "overrides", ")", "# Prepend root_dir to other paths", "prepend_root_dirs", "=", "[", "'formula_path'", ",", "'pillar_path'", ",", "'reactor_path'", ",", "'spm_cache_dir'", ",", "'spm_build_dir'", "]", "# These can be set to syslog, so, not actual paths on the system", "for", "config_key", "in", "(", "'spm_logfile'", ",", ")", ":", "log_setting", "=", "opts", ".", "get", "(", "config_key", ",", "''", ")", "if", "log_setting", "is", "None", ":", "continue", "if", "urlparse", "(", "log_setting", ")", ".", "scheme", "==", "''", ":", "prepend_root_dirs", ".", "append", "(", "config_key", ")", "prepend_root_dir", "(", "opts", ",", "prepend_root_dirs", ")", "return", "opts" ]
Get value from data .
def value ( self , data ) : value = data . get ( self . name ) if value : return int ( value ) return self . default
7,130
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L251-L256
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Get the field names and their values from a node .
def get_fields ( node , fields_tag = "field_list" ) : fields_nodes = [ c for c in node . children if c . tagname == fields_tag ] if len ( fields_nodes ) == 0 : return { } assert len ( fields_nodes ) == 1 , "multiple nodes with tag " + fields_tag fields_node = fields_nodes [ 0 ] fields = [ { f . tagname : f . rawsource . strip ( ) for f in n . children } for n in fields_node . children if n . tagname == "field" ] return { f [ "field_name" ] : f [ "field_body" ] for f in fields }
7,131
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L81-L99
[ "def", "evolve_pn_spins", "(", "q", ",", "chiA0", ",", "chiB0", ",", "omega0", ",", "omegaTimesM_final", ",", "approximant", "=", "'SpinTaylorT4'", ",", "dt", "=", "0.1", ",", "spinO", "=", "7", ",", "phaseO", "=", "7", ")", ":", "omega", ",", "phi", ",", "chiA", ",", "chiB", ",", "lNhat", ",", "e1", "=", "lal_spin_evloution_wrapper", "(", "approximant", ",", "q", ",", "omega0", ",", "chiA0", ",", "chiB0", ",", "dt", ",", "spinO", ",", "phaseO", ")", "# Compute omega, inertial spins, angular momentum direction and orbital", "# phase when omega = omegaTimesM_final", "end_idx", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "omega", "-", "omegaTimesM_final", ")", ")", "omegaTimesM_end", "=", "omega", "[", "end_idx", "]", "chiA_end", "=", "chiA", "[", "end_idx", "]", "chiB_end", "=", "chiB", "[", "end_idx", "]", "lNhat_end", "=", "lNhat", "[", "end_idx", "]", "phi_end", "=", "phi", "[", "end_idx", "]", "# Align the z-direction along orbital angular momentum direction", "# at end_idx. This moves us in to the coprecessing frame.", "q_copr_end", "=", "_utils", ".", "alignVec_quat", "(", "lNhat_end", ")", "chiA_end_copr", "=", "_utils", ".", "transformTimeDependentVector", "(", "np", ".", "array", "(", "[", "q_copr_end", "]", ")", ".", "T", ",", "np", ".", "array", "(", "[", "chiA_end", "]", ")", ".", "T", ",", "inverse", "=", "1", ")", ".", "T", "[", "0", "]", "chiB_end_copr", "=", "_utils", ".", "transformTimeDependentVector", "(", "np", ".", "array", "(", "[", "q_copr_end", "]", ")", ".", "T", ",", "np", ".", "array", "(", "[", "chiB_end", "]", ")", ".", "T", ",", "inverse", "=", "1", ")", ".", "T", "[", "0", "]", "return", "chiA_end_copr", ",", "chiB_end_copr", ",", "q_copr_end", ",", "phi_end", ",", "omegaTimesM_end" ]
Extract the signature from a docstring .
def extract_signature ( docstring ) : root = publish_doctree ( docstring , settings_overrides = { "report_level" : 5 } ) fields = get_fields ( root ) return fields . get ( SIG_FIELD )
7,132
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L102-L111
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Split a parameter types declaration into individual types .
def split_parameter_types ( parameters ) : if parameters == "" : return [ ] # only consider the top level commas, ignore the ones in [] commas = [ ] bracket_depth = 0 for i , char in enumerate ( parameters ) : if ( char == "," ) and ( bracket_depth == 0 ) : commas . append ( i ) elif char == "[" : bracket_depth += 1 elif char == "]" : bracket_depth -= 1 types = [ ] last_i = 0 for i in commas : types . append ( parameters [ last_i : i ] . strip ( ) ) last_i = i + 1 else : types . append ( parameters [ last_i : ] . strip ( ) ) return types
7,133
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L127-L158
[ "def", "update_experiment", "(", ")", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "if", "not", "experiment_dict", ":", "return", "None", "for", "key", "in", "experiment_dict", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "experiment_dict", "[", "key", "]", ",", "dict", ")", ":", "if", "experiment_dict", "[", "key", "]", ".", "get", "(", "'status'", ")", "!=", "'STOPPED'", ":", "nni_config", "=", "Config", "(", "experiment_dict", "[", "key", "]", "[", "'fileName'", "]", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "not", "detect_process", "(", "rest_pid", ")", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'status'", ",", "'STOPPED'", ")", "continue", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "startTime", ",", "endTime", "=", "get_experiment_time", "(", "rest_port", ")", "if", "startTime", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'startTime'", ",", "startTime", ")", "if", "endTime", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'endTime'", ",", "endTime", ")", "status", "=", "get_experiment_status", "(", "rest_port", ")", "if", "status", ":", "experiment_config", ".", "update_experiment", "(", "key", ",", "'status'", ",", "status", ")" ]
Parse a signature into its input and return parameter types .
def parse_signature ( signature ) : if " -> " not in signature : # signature comment: no parameters, treat variable type as return type param_types , return_type = None , signature . strip ( ) else : lhs , return_type = [ s . strip ( ) for s in signature . split ( " -> " ) ] csv = lhs [ 1 : - 1 ] . strip ( ) # remove the parentheses around the parameter type list param_types = split_parameter_types ( csv ) requires = set ( _RE_QUALIFIED_TYPES . findall ( signature ) ) return param_types , return_type , requires
7,134
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L161-L179
[ "def", "_timeout_exceeded", "(", "self", ",", "start", ",", "msg", "=", "\"Timeout exceeded!\"", ")", ":", "if", "not", "start", ":", "# Must provide a comparison time", "return", "False", "if", "time", ".", "time", "(", ")", "-", "start", ">", "self", ".", "session_timeout", ":", "# session_timeout exceeded", "raise", "NetMikoTimeoutException", "(", "msg", ")", "return", "False" ]
Get the type aliases in the source .
def get_aliases ( lines ) : aliases = { } for line in lines : line = line . strip ( ) if len ( line ) > 0 and line . startswith ( SIG_ALIAS ) : _ , content = line . split ( SIG_ALIAS ) alias , signature = [ t . strip ( ) for t in content . split ( "=" ) ] aliases [ alias ] = signature return aliases
7,135
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L372-L386
[ "def", "prox_unity", "(", "X", ",", "step", ",", "axis", "=", "0", ")", ":", "return", "X", "/", "np", ".", "sum", "(", "X", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")" ]
Get the stub code for a source code .
def get_stub ( source , generic = False ) : generator = StubGenerator ( source , generic = generic ) stub = generator . generate_stub ( ) return stub
7,136
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L745-L755
[ "def", "_GetPathSegmentIndexForOccurrenceWeights", "(", "self", ",", "occurrence_weights", ",", "value_weights", ")", ":", "largest_weight", "=", "occurrence_weights", ".", "GetLargestWeight", "(", ")", "if", "largest_weight", ">", "0", ":", "occurrence_weight_indexes", "=", "occurrence_weights", ".", "GetIndexesForWeight", "(", "largest_weight", ")", "number_of_occurrence_indexes", "=", "len", "(", "occurrence_weight_indexes", ")", "else", ":", "number_of_occurrence_indexes", "=", "0", "path_segment_index", "=", "None", "if", "number_of_occurrence_indexes", "==", "0", ":", "path_segment_index", "=", "self", ".", "_GetPathSegmentIndexForValueWeights", "(", "value_weights", ")", "elif", "number_of_occurrence_indexes", "==", "1", ":", "path_segment_index", "=", "occurrence_weight_indexes", "[", "0", "]", "else", ":", "largest_weight", "=", "0", "for", "occurrence_index", "in", "occurrence_weight_indexes", ":", "value_weight", "=", "value_weights", ".", "GetWeightForIndex", "(", "occurrence_index", ")", "if", "not", "path_segment_index", "or", "largest_weight", "<", "value_weight", ":", "largest_weight", "=", "value_weight", "path_segment_index", "=", "occurrence_index", "return", "path_segment_index" ]
Get source and stub paths for a module .
def get_mod_paths ( mod_name , out_dir ) : paths = [ ] try : mod = get_loader ( mod_name ) source = Path ( mod . path ) if source . name . endswith ( ".py" ) : source_rel = Path ( * mod_name . split ( "." ) ) if source . name == "__init__.py" : source_rel = source_rel . joinpath ( "__init__.py" ) destination = Path ( out_dir , source_rel . with_suffix ( ".pyi" ) ) paths . append ( ( source , destination ) ) except Exception as e : _logger . debug ( e ) _logger . warning ( "cannot handle module, skipping: %s" , mod_name ) return paths
7,137
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L758-L773
[ "def", "delete_group", "(", "group_id", ",", "purge_data", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "try", ":", "group_i", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceGroup", ")", ".", "filter", "(", "ResourceGroup", ".", "id", "==", "group_id", ")", ".", "one", "(", ")", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Group %s not found\"", "%", "(", "group_id", ")", ")", "group_items", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceGroupItem", ")", ".", "filter", "(", "ResourceGroupItem", ".", "group_id", "==", "group_id", ")", ".", "all", "(", ")", "for", "gi", "in", "group_items", ":", "db", ".", "DBSession", ".", "delete", "(", "gi", ")", "if", "purge_data", "==", "'Y'", ":", "_purge_datasets_unique_to_resource", "(", "'GROUP'", ",", "group_id", ")", "log", ".", "info", "(", "\"Deleting group %s, id=%s\"", ",", "group_i", ".", "name", ",", "group_id", ")", "group_i", ".", "network", ".", "check_write_permission", "(", "user_id", ")", "db", ".", "DBSession", ".", "delete", "(", "group_i", ")", "db", ".", "DBSession", ".", "flush", "(", ")" ]
Recursively get all source and stub paths for a package .
def get_pkg_paths ( pkg_name , out_dir ) : paths = [ ] try : pkg = import_module ( pkg_name ) if not hasattr ( pkg , "__path__" ) : return get_mod_paths ( pkg_name , out_dir ) for mod_info in walk_packages ( pkg . __path__ , pkg . __name__ + "." ) : mod_paths = get_mod_paths ( mod_info . name , out_dir ) paths . extend ( mod_paths ) except Exception as e : _logger . debug ( e ) _logger . warning ( "cannot handle package, skipping: %s" , pkg_name ) return paths
7,138
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L776-L789
[ "def", "create_calendar_event", "(", "self", ",", "calendar_event_context_code", ",", "calendar_event_child_event_data_X_context_code", "=", "None", ",", "calendar_event_child_event_data_X_end_at", "=", "None", ",", "calendar_event_child_event_data_X_start_at", "=", "None", ",", "calendar_event_description", "=", "None", ",", "calendar_event_duplicate_append_iterator", "=", "None", ",", "calendar_event_duplicate_count", "=", "None", ",", "calendar_event_duplicate_frequency", "=", "None", ",", "calendar_event_duplicate_interval", "=", "None", ",", "calendar_event_end_at", "=", "None", ",", "calendar_event_location_address", "=", "None", ",", "calendar_event_location_name", "=", "None", ",", "calendar_event_start_at", "=", "None", ",", "calendar_event_time_zone_edited", "=", "None", ",", "calendar_event_title", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - calendar_event[context_code]\r", "\"\"\"Context code of the course/group/user whose calendar this event should be\r\n added to.\"\"\"", "data", "[", "\"calendar_event[context_code]\"", "]", "=", "calendar_event_context_code", "# OPTIONAL - calendar_event[title]\r", "\"\"\"Short title for the calendar event.\"\"\"", "if", "calendar_event_title", "is", "not", "None", ":", "data", "[", "\"calendar_event[title]\"", "]", "=", "calendar_event_title", "# OPTIONAL - calendar_event[description]\r", "\"\"\"Longer HTML description of the event.\"\"\"", "if", "calendar_event_description", "is", "not", "None", ":", "data", "[", "\"calendar_event[description]\"", "]", "=", "calendar_event_description", "# OPTIONAL - calendar_event[start_at]\r", "\"\"\"Start date/time of the event.\"\"\"", "if", "calendar_event_start_at", "is", "not", "None", ":", "data", "[", "\"calendar_event[start_at]\"", "]", "=", "calendar_event_start_at", "# OPTIONAL - calendar_event[end_at]\r", "\"\"\"End date/time of the event.\"\"\"", "if", "calendar_event_end_at", "is", "not", "None", ":", "data", "[", "\"calendar_event[end_at]\"", "]", "=", "calendar_event_end_at", "# OPTIONAL - calendar_event[location_name]\r", "\"\"\"Location name of the event.\"\"\"", "if", "calendar_event_location_name", "is", "not", "None", ":", "data", "[", "\"calendar_event[location_name]\"", "]", "=", "calendar_event_location_name", "# OPTIONAL - calendar_event[location_address]\r", "\"\"\"Location address\"\"\"", "if", "calendar_event_location_address", "is", "not", "None", ":", "data", "[", "\"calendar_event[location_address]\"", "]", "=", "calendar_event_location_address", "# OPTIONAL - calendar_event[time_zone_edited]\r", "\"\"\"Time zone of the user editing the event. Allowed time zones are\r\n {http://www.iana.org/time-zones IANA time zones} or friendlier\r\n {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.\"\"\"", "if", "calendar_event_time_zone_edited", "is", "not", "None", ":", "data", "[", "\"calendar_event[time_zone_edited]\"", "]", "=", "calendar_event_time_zone_edited", "# OPTIONAL - calendar_event[child_event_data][X][start_at]\r", "\"\"\"Section-level start time(s) if this is a course event. X can be any\r\n identifier, provided that it is consistent across the start_at, end_at\r\n and context_code\"\"\"", "if", "calendar_event_child_event_data_X_start_at", "is", "not", "None", ":", "data", "[", "\"calendar_event[child_event_data][X][start_at]\"", "]", "=", "calendar_event_child_event_data_X_start_at", "# OPTIONAL - calendar_event[child_event_data][X][end_at]\r", "\"\"\"Section-level end time(s) if this is a course event.\"\"\"", "if", "calendar_event_child_event_data_X_end_at", "is", "not", "None", ":", "data", "[", "\"calendar_event[child_event_data][X][end_at]\"", "]", "=", "calendar_event_child_event_data_X_end_at", "# OPTIONAL - calendar_event[child_event_data][X][context_code]\r", "\"\"\"Context code(s) corresponding to the section-level start and end time(s).\"\"\"", "if", "calendar_event_child_event_data_X_context_code", "is", "not", "None", ":", "data", "[", "\"calendar_event[child_event_data][X][context_code]\"", "]", "=", "calendar_event_child_event_data_X_context_code", "# OPTIONAL - calendar_event[duplicate][count]\r", "\"\"\"Number of times to copy/duplicate the event.\"\"\"", "if", "calendar_event_duplicate_count", "is", "not", "None", ":", "data", "[", "\"calendar_event[duplicate][count]\"", "]", "=", "calendar_event_duplicate_count", "# OPTIONAL - calendar_event[duplicate][interval]\r", "\"\"\"Defaults to 1 if duplicate `count` is set. The interval between the duplicated events.\"\"\"", "if", "calendar_event_duplicate_interval", "is", "not", "None", ":", "data", "[", "\"calendar_event[duplicate][interval]\"", "]", "=", "calendar_event_duplicate_interval", "# OPTIONAL - calendar_event[duplicate][frequency]\r", "\"\"\"Defaults to \"weekly\". The frequency at which to duplicate the event\"\"\"", "if", "calendar_event_duplicate_frequency", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "calendar_event_duplicate_frequency", ",", "[", "\"daily\"", ",", "\"weekly\"", ",", "\"monthly\"", "]", ")", "data", "[", "\"calendar_event[duplicate][frequency]\"", "]", "=", "calendar_event_duplicate_frequency", "# OPTIONAL - calendar_event[duplicate][append_iterator]\r", "\"\"\"Defaults to false. If set to `true`, an increasing counter number will be appended to the event title\r\n when the event is duplicated. (e.g. Event 1, Event 2, Event 3, etc)\"\"\"", "if", "calendar_event_duplicate_append_iterator", "is", "not", "None", ":", "data", "[", "\"calendar_event[duplicate][append_iterator]\"", "]", "=", "calendar_event_duplicate_append_iterator", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/calendar_events with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/calendar_events\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
Modify the docstring before generating documentation .
def process_docstring ( app , what , name , obj , options , lines ) : aliases = getattr ( app , "_sigaliases" , None ) if aliases is None : if what == "module" : aliases = get_aliases ( inspect . getsource ( obj ) . splitlines ( ) ) app . _sigaliases = aliases sig_marker = ":" + SIG_FIELD + ":" is_class = what in ( "class" , "exception" ) signature = extract_signature ( "\n" . join ( lines ) ) if signature is None : if not is_class : return init_method = getattr ( obj , "__init__" ) init_doc = init_method . __doc__ init_lines = init_doc . splitlines ( ) [ 1 : ] if len ( init_lines ) > 1 : init_doc = textwrap . dedent ( "\n" . join ( init_lines [ 1 : ] ) ) init_lines = init_doc . splitlines ( ) if sig_marker not in init_doc : return sig_started = False for line in init_lines : if line . lstrip ( ) . startswith ( sig_marker ) : sig_started = True if sig_started : lines . append ( line ) signature = extract_signature ( "\n" . join ( lines ) ) if is_class : obj = init_method param_types , rtype , _ = parse_signature ( signature ) param_names = [ p for p in inspect . signature ( obj ) . parameters ] if is_class and ( param_names [ 0 ] == "self" ) : del param_names [ 0 ] # if something goes wrong, don't insert parameter types if len ( param_names ) == len ( param_types ) : for name , type_ in zip ( param_names , param_types ) : find = ":param %(name)s:" % { "name" : name } alias = aliases . get ( type_ ) if alias is not None : type_ = "*%(type)s* :sup:`%(alias)s`" % { "type" : type_ , "alias" : alias } for i , line in enumerate ( lines ) : if line . startswith ( find ) : lines . insert ( i , ":type %(name)s: %(type)s" % { "name" : name , "type" : type_ } ) break if not is_class : for i , line in enumerate ( lines ) : if line . startswith ( ( ":return:" , ":returns:" ) ) : lines . insert ( i , ":rtype: " + rtype ) break # remove the signature field sig_start = 0 while sig_start < len ( lines ) : if lines [ sig_start ] . startswith ( sig_marker ) : break sig_start += 1 sig_end = sig_start + 1 while sig_end < len ( lines ) : if ( not lines [ sig_end ] ) or ( lines [ sig_end ] [ 0 ] != " " ) : break sig_end += 1 for i in reversed ( range ( sig_start , sig_end ) ) : del lines [ i ]
7,139
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L797-L874
[ "def", "_mmUpdateDutyCycles", "(", "self", ")", ":", "period", "=", "self", ".", "getDutyCyclePeriod", "(", ")", "unionSDRArray", "=", "numpy", ".", "zeros", "(", "self", ".", "getNumColumns", "(", ")", ")", "unionSDRArray", "[", "list", "(", "self", ".", "_mmTraces", "[", "\"unionSDR\"", "]", ".", "data", "[", "-", "1", "]", ")", "]", "=", "1", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"unionSDRDutyCycle\"", "]", ",", "unionSDRArray", ",", "period", ")", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", "=", "UnionTemporalPoolerMonitorMixin", ".", "_mmUpdateDutyCyclesHelper", "(", "self", ".", "_mmData", "[", "\"persistenceDutyCycle\"", "]", ",", "self", ".", "_poolingActivation", ",", "period", ")" ]
Start the command line interface .
def main ( argv = None ) : parser = ArgumentParser ( prog = "pygenstub" ) parser . add_argument ( "--version" , action = "version" , version = "%(prog)s " + __version__ ) parser . add_argument ( "files" , nargs = "*" , help = "generate stubs for given files" ) parser . add_argument ( "-m" , "--module" , action = "append" , metavar = "MODULE" , dest = "modules" , default = [ ] , help = "generate stubs for given modules" , ) parser . add_argument ( "-o" , "--output" , metavar = "PATH" , dest = "out_dir" , help = "change the output directory" ) parser . add_argument ( "--generic" , action = "store_true" , default = False , help = "generate generic stubs" ) parser . add_argument ( "--debug" , action = "store_true" , help = "enable debug messages" ) argv = argv if argv is not None else sys . argv arguments = parser . parse_args ( argv [ 1 : ] ) # set debug mode if arguments . debug : logging . basicConfig ( level = logging . DEBUG ) _logger . debug ( "running in debug mode" ) out_dir = arguments . out_dir if arguments . out_dir is not None else "" if ( out_dir == "" ) and ( len ( arguments . modules ) > 0 ) : print ( "Output directory must be given when generating stubs for modules." ) sys . exit ( 1 ) modules = [ ] for path in arguments . files : paths = Path ( path ) . glob ( "**/*.py" ) if Path ( path ) . is_dir ( ) else [ Path ( path ) ] for source in paths : if str ( source ) . startswith ( os . path . pardir ) : source = source . absolute ( ) . resolve ( ) if ( out_dir != "" ) and source . is_absolute ( ) : source = source . relative_to ( source . root ) destination = Path ( out_dir , source . with_suffix ( ".pyi" ) ) modules . append ( ( source , destination ) ) for mod_name in arguments . modules : modules . extend ( get_pkg_paths ( mod_name , out_dir ) ) for source , destination in modules : _logger . info ( "generating stub for %s to path %s" , source , destination ) with source . open ( ) as f : code = f . read ( ) try : stub = get_stub ( code , generic = arguments . generic ) except Exception as e : print ( source , "-" , e , file = sys . stderr ) continue if stub != "" : if not destination . parent . exists ( ) : destination . parent . mkdir ( parents = True ) with destination . open ( "w" ) as f : f . write ( "# " + EDIT_WARNING + "\n\n" + stub )
7,140
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L888-L951
[ "def", "prepare_filename_decorator", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "inner", "(", "self", ",", "normalized_url", ",", "request", ")", ":", "ext", "=", "settings", ".", "ROUGHPAGES_TEMPLATE_FILE_EXT", "if", "not", "normalized_url", ":", "normalized_url", "=", "settings", ".", "ROUGHPAGES_INDEX_FILENAME", "filenames", "=", "fn", "(", "self", ",", "normalized_url", ",", "request", ")", "filenames", "=", "[", "x", "+", "ext", "for", "x", "in", "filenames", "if", "x", "]", "return", "filenames", "return", "inner" ]
Add a variable node to this node .
def add_variable ( self , node ) : if node . name not in self . variable_names : self . variables . append ( node ) self . variable_names . add ( node . name ) node . parent = self
7,141
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L195-L204
[ "def", "parse_manifest", "(", "manifest_path", ")", ":", "samples", "=", "[", "]", "with", "open", "(", "manifest_path", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "not", "line", ".", "isspace", "(", ")", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "sample", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "require", "(", "2", "<=", "len", "(", "sample", ")", "<=", "3", ",", "'Bad manifest format! '", "'Expected UUID\\tURL1\\t[URL2] (tab separated), got: {}'", ".", "format", "(", "sample", ")", ")", "uuid", "=", "sample", "[", "0", "]", "urls", "=", "sample", "[", "1", ":", "]", "for", "url", "in", "urls", ":", "require", "(", "urlparse", "(", "url", ")", ".", "scheme", "and", "urlparse", "(", "url", ")", ",", "'Invalid URL passed for {}'", ".", "format", "(", "url", ")", ")", "samples", ".", "append", "(", "[", "uuid", ",", "urls", "]", ")", "return", "samples" ]
Get the stub code for this node .
def get_code ( self ) : stub = [ ] for child in self . variables : stub . extend ( child . get_code ( ) ) if ( ( len ( self . variables ) > 0 ) and ( len ( self . children ) > 0 ) and ( not isinstance ( self , ClassNode ) ) ) : stub . append ( "" ) for child in self . children : stub . extend ( child . get_code ( ) ) return stub
7,142
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L215-L235
[ "def", "get_vocab", "(", "self", ",", "vocab_name", ",", "*", "*", "kwargs", ")", ":", "vocab_dict", "=", "self", ".", "__get_vocab_dict__", "(", "vocab_name", ",", "*", "*", "kwargs", ")", "filepaths", "=", "list", "(", "set", "(", "[", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "vocab_dict", "[", "'filename'", "]", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "vocab_dir", ",", "vocab_dict", "[", "'filename'", "]", ")", "]", ")", ")", "for", "path", "in", "filepaths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f_obj", ":", "vocab_dict", ".", "update", "(", "{", "\"name\"", ":", "vocab_name", ",", "\"data\"", ":", "f_obj", ".", "read", "(", ")", ",", "\"modified\"", ":", "os", ".", "path", ".", "getmtime", "(", "path", ")", "}", ")", "return", "vocab_dict", "download_locs", "=", "make_list", "(", "vocab_dict", ".", "get", "(", "'download'", ",", "[", "]", ")", ")", "for", "loc", "in", "download_locs", ":", "loc_web", "=", "urllib", ".", "request", ".", "urlopen", "(", "loc", ")", "# loc_file_date = date_parse(loc_web.info()['Last-Modified'])", "urllib", ".", "request", ".", "urlretrieve", "(", "loc", ",", "filepaths", "[", "0", "]", ")", "with", "open", "(", "filepaths", "[", "0", "]", ",", "'rb'", ")", "as", "f_obj", ":", "vocab_dict", ".", "update", "(", "{", "\"name\"", ":", "vocab_name", ",", "\"data\"", ":", "f_obj", ".", "read", "(", ")", ",", "\"modified\"", ":", "os", ".", "path", ".", "getmtime", "(", "filepaths", "[", "0", "]", ")", "}", ")", "return", "vocab_dict" ]
Get the stub code for this function .
def get_code ( self ) : stub = [ ] for deco in self . decorators : if ( deco in DECORATORS ) or deco . endswith ( ".setter" ) : stub . append ( "@" + deco ) parameters = [ ] for name , type_ , has_default in self . parameters : decl = "%(n)s%(t)s%(d)s" % { "n" : name , "t" : ": " + type_ if type_ else "" , "d" : " = ..." if has_default else "" , } parameters . append ( decl ) slots = { "a" : "async " if self . _async else "" , "n" : self . name , "p" : ", " . join ( parameters ) , "r" : self . rtype , } prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots if len ( prototype ) <= LINE_LENGTH_LIMIT : stub . append ( prototype ) elif len ( INDENT + slots [ "p" ] ) <= LINE_LENGTH_LIMIT : stub . append ( "%(a)sdef %(n)s(" % slots ) stub . append ( INDENT + slots [ "p" ] ) stub . append ( ") -> %(r)s: ..." % slots ) else : stub . append ( "%(a)sdef %(n)s(" % slots ) for param in parameters : stub . append ( INDENT + param + "," ) stub . append ( ") -> %(r)s: ..." % slots ) return stub
7,143
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L290-L331
[ "def", "list_container_instance_groups", "(", "access_token", ",", "subscription_id", ",", "resource_group", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerInstance/ContainerGroups'", ",", "'?api-version='", ",", "CONTAINER_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
Get the stub code for this class .
def get_code ( self ) : stub = [ ] bases = ( "(" + ", " . join ( self . bases ) + ")" ) if len ( self . bases ) > 0 else "" slots = { "n" : self . name , "b" : bases } if ( len ( self . children ) == 0 ) and ( len ( self . variables ) == 0 ) : stub . append ( "class %(n)s%(b)s: ..." % slots ) else : stub . append ( "class %(n)s%(b)s:" % slots ) super_code = super ( ) . get_code ( ) if PY3 else StubNode . get_code ( self ) for line in super_code : stub . append ( INDENT + line ) return stub
7,144
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L353-L369
[ "def", "write_result_stream", "(", "result_stream", ",", "filename_prefix", "=", "None", ",", "results_per_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "result_stream", ",", "types", ".", "GeneratorType", ")", ":", "stream", "=", "result_stream", "else", ":", "stream", "=", "result_stream", ".", "stream", "(", ")", "file_time_formatter", "=", "\"%Y-%m-%dT%H_%M_%S\"", "if", "filename_prefix", "is", "None", ":", "filename_prefix", "=", "\"twitter_search_results\"", "if", "results_per_file", ":", "logger", ".", "info", "(", "\"chunking result stream to files with {} tweets per file\"", ".", "format", "(", "results_per_file", ")", ")", "chunked_stream", "=", "partition", "(", "stream", ",", "results_per_file", ",", "pad_none", "=", "True", ")", "for", "chunk", "in", "chunked_stream", ":", "chunk", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "chunk", ")", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}_{}.json\"", ".", "format", "(", "filename_prefix", ",", "curr_datetime", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "chunk", ")", "else", ":", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}.json\"", ".", "format", "(", "filename_prefix", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "stream", ")" ]
Collect the type aliases in the source .
def collect_aliases ( self ) : self . aliases = get_aliases ( self . _code_lines ) for alias , signature in self . aliases . items ( ) : _ , _ , requires = parse_signature ( signature ) self . required_types |= requires self . defined_types |= { alias }
7,145
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L417-L426
[ "def", "prox_unity", "(", "X", ",", "step", ",", "axis", "=", "0", ")", ":", "return", "X", "/", "np", ".", "sum", "(", "X", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")" ]
Visit an import node .
def visit_Import ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] module_name = line . split ( "import" ) [ 0 ] . strip ( ) for name in node . names : imported_name = name . name if name . asname : imported_name = name . asname + "::" + imported_name self . imported_namespaces [ imported_name ] = module_name
7,146
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L428-L436
[ "def", "get_form_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "super", "(", "ClassRegistrationView", ",", "self", ")", ".", "get_form_kwargs", "(", "*", "*", "kwargs", ")", "kwargs", "[", "'user'", "]", "=", "self", ".", "request", ".", "user", "if", "hasattr", "(", "self", ".", "request", ",", "'user'", ")", "else", "None", "listing", "=", "self", ".", "get_listing", "(", ")", "kwargs", ".", "update", "(", "{", "'openEvents'", ":", "listing", "[", "'openEvents'", "]", ",", "'closedEvents'", ":", "listing", "[", "'closedEvents'", "]", ",", "}", ")", "return", "kwargs" ]
Visit an from - import node .
def visit_ImportFrom ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] module_name = line . split ( "from" ) [ 1 ] . split ( "import" ) [ 0 ] . strip ( ) for name in node . names : imported_name = name . name if name . asname : imported_name = name . asname + "::" + imported_name self . imported_names [ imported_name ] = module_name
7,147
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L438-L446
[ "def", "get_form_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "super", "(", "ClassRegistrationView", ",", "self", ")", ".", "get_form_kwargs", "(", "*", "*", "kwargs", ")", "kwargs", "[", "'user'", "]", "=", "self", ".", "request", ".", "user", "if", "hasattr", "(", "self", ".", "request", ",", "'user'", ")", "else", "None", "listing", "=", "self", ".", "get_listing", "(", ")", "kwargs", ".", "update", "(", "{", "'openEvents'", ":", "listing", "[", "'openEvents'", "]", ",", "'closedEvents'", ":", "listing", "[", "'closedEvents'", "]", ",", "}", ")", "return", "kwargs" ]
Visit an assignment node .
def visit_Assign ( self , node ) : line = self . _code_lines [ node . lineno - 1 ] if SIG_COMMENT in line : line = _RE_COMMENT_IN_STRING . sub ( "" , line ) if ( SIG_COMMENT not in line ) and ( not self . generic ) : return if SIG_COMMENT in line : _ , signature = line . split ( SIG_COMMENT ) _ , return_type , requires = parse_signature ( signature ) self . required_types |= requires parent = self . _parents [ - 1 ] for var in node . targets : if isinstance ( var , ast . Name ) : name , p = var . id , parent elif ( isinstance ( var , ast . Attribute ) and isinstance ( var . value , ast . Name ) and ( var . value . id == "self" ) ) : name , p = var . attr , parent . parent else : name , p = None , None if name is not None : if self . generic : return_type = "Any" self . required_types . add ( return_type ) stub_node = VariableNode ( name , return_type ) p . add_variable ( stub_node )
7,148
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L448-L480
[ "def", "_render_jar_tool_args", "(", "self", ",", "options", ")", ":", "args", "=", "[", "]", "with", "temporary_dir", "(", ")", "as", "manifest_stage_dir", ":", "# relativize urls in canonical classpath, this needs to be stable too therefore", "# do not follow the symlinks because symlinks may vary from platform to platform.", "classpath", "=", "relativize_classpath", "(", "self", ".", "classpath", ",", "os", ".", "path", ".", "dirname", "(", "self", ".", "_path", ")", ",", "followlinks", "=", "False", ")", "def", "as_cli_entry", "(", "entry", ")", ":", "src", "=", "entry", ".", "materialize", "(", "manifest_stage_dir", ")", "return", "'{}={}'", ".", "format", "(", "src", ",", "entry", ".", "dest", ")", "if", "entry", ".", "dest", "else", "src", "files", "=", "[", "as_cli_entry", "(", "entry", ")", "for", "entry", "in", "self", ".", "_entries", "]", "if", "self", ".", "_entries", "else", "[", "]", "jars", "=", "self", ".", "_jars", "or", "[", "]", "with", "safe_args", "(", "classpath", ",", "options", ",", "delimiter", "=", "','", ")", "as", "classpath_args", ":", "with", "safe_args", "(", "files", ",", "options", ",", "delimiter", "=", "','", ")", "as", "files_args", ":", "with", "safe_args", "(", "jars", ",", "options", ",", "delimiter", "=", "','", ")", "as", "jars_args", ":", "# If you specify --manifest to jar-tool you cannot specify --main.", "if", "self", ".", "_manifest_entry", ":", "manifest_file", "=", "self", ".", "_manifest_entry", ".", "materialize", "(", "manifest_stage_dir", ")", "else", ":", "manifest_file", "=", "None", "if", "self", ".", "_main", "and", "manifest_file", ":", "main_arg", "=", "None", "with", "open", "(", "manifest_file", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "\"Main-Class: {}\\n\"", ".", "format", "(", "self", ".", "_main", ")", ")", "else", ":", "main_arg", "=", "self", ".", "_main", "if", "main_arg", ":", "args", ".", "append", "(", "'-main={}'", ".", "format", "(", "self", ".", "_main", ")", ")", "if", "classpath_args", ":", "args", ".", "append", "(", "'-classpath={}'", ".", "format", "(", "','", ".", "join", "(", "classpath_args", ")", ")", ")", "if", "manifest_file", ":", "args", ".", "append", "(", "'-manifest={}'", ".", "format", "(", "manifest_file", ")", ")", "if", "files_args", ":", "args", ".", "append", "(", "'-files={}'", ".", "format", "(", "','", ".", "join", "(", "files_args", ")", ")", ")", "if", "jars_args", ":", "args", ".", "append", "(", "'-jars={}'", ".", "format", "(", "','", ".", "join", "(", "jars_args", ")", ")", ")", "yield", "args" ]
Visit a function node .
def visit_FunctionDef ( self , node ) : node = self . get_function_node ( node ) if node is not None : node . _async = False
7,149
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L577-L581
[ "def", "libvlc_log_set_file", "(", "p_instance", ",", "stream", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_log_set_file'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_log_set_file'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "Instance", ",", "FILE_ptr", ")", "return", "f", "(", "p_instance", ",", "stream", ")" ]
Visit an async function node .
def visit_AsyncFunctionDef ( self , node ) : node = self . get_function_node ( node ) if node is not None : node . _async = True
7,150
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L583-L587
[ "def", "_match_directories", "(", "self", ",", "entries", ",", "root", ",", "regex_string", ")", ":", "self", ".", "log", "(", "u\"Matching directory names in paged hierarchy\"", ")", "self", ".", "log", "(", "[", "u\"Matching within '%s'\"", ",", "root", "]", ")", "self", ".", "log", "(", "[", "u\"Matching regex '%s'\"", ",", "regex_string", "]", ")", "regex", "=", "re", ".", "compile", "(", "r\"\"", "+", "regex_string", ")", "directories", "=", "set", "(", ")", "root_len", "=", "len", "(", "root", ")", "for", "entry", "in", "entries", ":", "# look only inside root dir", "if", "entry", ".", "startswith", "(", "root", ")", ":", "self", ".", "log", "(", "[", "u\"Examining '%s'\"", ",", "entry", "]", ")", "# remove common prefix root/", "entry", "=", "entry", "[", "root_len", "+", "1", ":", "]", "# split path", "entry_splitted", "=", "entry", ".", "split", "(", "os", ".", "sep", ")", "# match regex", "if", "(", "(", "len", "(", "entry_splitted", ")", ">=", "2", ")", "and", "(", "re", ".", "match", "(", "regex", ",", "entry_splitted", "[", "0", "]", ")", "is", "not", "None", ")", ")", ":", "directories", ".", "add", "(", "entry_splitted", "[", "0", "]", ")", "self", ".", "log", "(", "[", "u\"Match: '%s'\"", ",", "entry_splitted", "[", "0", "]", "]", ")", "else", ":", "self", ".", "log", "(", "[", "u\"No match: '%s'\"", ",", "entry", "]", ")", "return", "sorted", "(", "directories", ")" ]
Visit a class node .
def visit_ClassDef ( self , node ) : self . defined_types . add ( node . name ) bases = [ ] for n in node . bases : base_parts = [ ] while True : if not isinstance ( n , ast . Attribute ) : base_parts . append ( n . id ) break else : base_parts . append ( n . attr ) n = n . value bases . append ( "." . join ( base_parts [ : : - 1 ] ) ) self . required_types |= set ( bases ) signature = get_signature ( node ) stub_node = ClassNode ( node . name , bases = bases , signature = signature ) self . _parents [ - 1 ] . add_child ( stub_node ) self . _parents . append ( stub_node ) self . generic_visit ( node ) del self . _parents [ - 1 ]
7,151
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L589-L612
[ "def", "render", "(", "self", ",", "data", ",", "accepted_media_type", "=", "None", ",", "renderer_context", "=", "None", ")", ":", "if", "'SWAGGER_JSON_PATH'", "in", "os", ".", "environ", ":", "with", "io", ".", "open", "(", "os", ".", "environ", "[", "'SWAGGER_JSON_PATH'", "]", ",", "'rb'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "else", ":", "return", "super", "(", "ConditionalOpenAPIRenderer", ",", "self", ")", ".", "render", "(", "data", ",", "accepted_media_type", ",", "renderer_context", ")" ]
Generate an import line .
def generate_import_from ( module_ , names ) : regular_names = [ n for n in names if "::" not in n ] as_names = [ n for n in names if "::" in n ] line = "" if len ( regular_names ) > 0 : slots = { "m" : module_ , "n" : ", " . join ( sorted ( regular_names ) ) } line = "from %(m)s import %(n)s" % slots if len ( line ) > LINE_LENGTH_LIMIT : slots [ "n" ] = INDENT + ( ",\n" + INDENT ) . join ( sorted ( regular_names ) ) + "," line = "from %(m)s import (\n%(n)s\n)" % slots if len ( as_names ) > 0 : line += "\n" for as_name in as_names : a , n = as_name . split ( "::" ) line += "from %(m)s import %(n)s as %(a)s" % { "m" : module_ , "n" : n , "a" : a } return line
7,152
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L615-L639
[ "def", "get_lateration_parameters", "(", "all_points", ",", "indices", ",", "index", ",", "edm", ",", "W", "=", "None", ")", ":", "if", "W", "is", "None", ":", "W", "=", "np", ".", "ones", "(", "edm", ".", "shape", ")", "# delete points that are not considered anchors", "anchors", "=", "np", ".", "delete", "(", "all_points", ",", "indices", ",", "axis", "=", "0", ")", "r2", "=", "np", ".", "delete", "(", "edm", "[", "index", ",", ":", "]", ",", "indices", ")", "w", "=", "np", ".", "delete", "(", "W", "[", "index", ",", ":", "]", ",", "indices", ")", "# set w to zero where measurements are invalid", "if", "np", ".", "isnan", "(", "r2", ")", ".", "any", "(", ")", ":", "nan_measurements", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "r2", ")", ")", "[", "0", "]", "r2", "[", "nan_measurements", "]", "=", "0.0", "w", "[", "nan_measurements", "]", "=", "0.0", "if", "np", ".", "isnan", "(", "w", ")", ".", "any", "(", ")", ":", "nan_measurements", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "w", ")", ")", "[", "0", "]", "r2", "[", "nan_measurements", "]", "=", "0.0", "w", "[", "nan_measurements", "]", "=", "0.0", "# delete anchors where weight is zero to avoid ill-conditioning", "missing_anchors", "=", "np", ".", "where", "(", "w", "==", "0.0", ")", "[", "0", "]", "w", "=", "np", ".", "asarray", "(", "np", ".", "delete", "(", "w", ",", "missing_anchors", ")", ")", "r2", "=", "np", ".", "asarray", "(", "np", ".", "delete", "(", "r2", ",", "missing_anchors", ")", ")", "w", ".", "resize", "(", "edm", ".", "shape", "[", "0", "]", "-", "len", "(", "indices", ")", "-", "len", "(", "missing_anchors", ")", ",", "1", ")", "r2", ".", "resize", "(", "edm", ".", "shape", "[", "0", "]", "-", "len", "(", "indices", ")", "-", "len", "(", "missing_anchors", ")", ",", "1", ")", "anchors", "=", "np", ".", "delete", "(", "anchors", ",", "missing_anchors", ",", "axis", "=", "0", ")", "assert", "w", ".", "shape", "[", "0", "]", "==", "anchors", ".", "shape", "[", "0", "]", "assert", "np", ".", "isnan", "(", "w", ")", ".", "any", "(", ")", "==", "False", "assert", "np", ".", "isnan", "(", "r2", ")", ".", "any", "(", ")", "==", "False", "return", "anchors", ",", "w", ",", "r2" ]
Returns True if the given request has permission to add an object . Can be overridden by the user in subclasses . By default we assume all staff users can use this action unless DJANGO_EXPORTS_REQUIRE_PERM is set to True in your django settings .
def has_csv_permission ( self , request , obj = None ) : if getattr ( settings , 'DJANGO_EXPORTS_REQUIRE_PERM' , None ) : opts = self . opts codename = '%s_%s' % ( 'csv' , opts . object_name . lower ( ) ) return request . user . has_perm ( "%s.%s" % ( opts . app_label , codename ) ) return True
7,153
https://github.com/rochapps/django-csv-exports/blob/efcdde401d66f38a64b37afa909bfc16a6c21e9e/django_csv_exports/admin.py#L54-L65
[ "def", "ping", "(", "self", ")", ":", "start", "=", "time", ".", "time", "(", ")", "self", ".", "_conn", ".", "ping", "(", ")", "return", "(", "time", ".", "time", "(", ")", "-", "start", ")" ]
Returns a new ImmutableDict instance with value associated with key . The implicit parameter is not modified .
def assoc ( self , key , value ) : copydict = ImmutableDict ( ) copydict . tree = self . tree . assoc ( hash ( key ) , ( key , value ) ) copydict . _length = self . _length + 1 return copydict
7,154
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L22-L28
[ "def", "on_train_end", "(", "self", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "train_start", "print", "(", "'done, took {:.3f} seconds'", ".", "format", "(", "duration", ")", ")" ]
Takes the same arguments as the update method in the builtin dict class . However this version returns a new ImmutableDict instead of modifying in - place .
def update ( self , other = None , * * kwargs ) : copydict = ImmutableDict ( ) if other : vallist = [ ( hash ( key ) , ( key , other [ key ] ) ) for key in other ] else : vallist = [ ] if kwargs : vallist += [ ( hash ( key ) , ( key , kwargs [ key ] ) ) for key in kwargs ] copydict . tree = self . tree . multi_assoc ( vallist ) copydict . _length = iter_length ( copydict . tree ) return copydict
7,155
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L30-L42
[ "def", "guest_start", "(", "self", ",", "userid", ")", ":", "requestData", "=", "\"PowerVM \"", "+", "userid", "+", "\" on\"", "with", "zvmutils", ".", "log_and_reraise_smt_request_failed", "(", ")", ":", "self", ".", "_request", "(", "requestData", ")" ]
Returns a new ImmutableDict with the given key removed .
def remove ( self , key ) : copydict = ImmutableDict ( ) copydict . tree = self . tree . remove ( hash ( key ) ) copydict . _length = self . _length - 1 return copydict
7,156
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L44-L49
[ "def", "read_lease_config", "(", "self", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/config/lease'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "response", "=", "self", ".", "_adapter", ".", "get", "(", "url", "=", "api_path", ",", ")", "return", "response", ".", "json", "(", ")" ]
Load and parse config file pass options to livestreamer
def _load_config ( self ) : config = SafeConfigParser ( ) config_file = os . path . join ( self . config_path , 'settings.ini' ) config . read ( config_file ) for option , type in list ( AVAILABLE_OPTIONS . items ( ) ) : if config . has_option ( 'DEFAULT' , option ) : if type == 'int' : value = config . getint ( 'DEFAULT' , option ) if type == 'float' : value = config . getfloat ( 'DEFAULT' , option ) if type == 'bool' : value = config . getboolean ( 'DEFAULT' , option ) if type == 'str' : value = config . get ( 'DEFAULT' , option ) self . livestreamer . set_option ( option , value )
7,157
https://github.com/thiagokokada/livedumper/blob/f6441283269b4a602cafea3be5cda9446fc64005/src/livedumper/dumper.py#L116-L134
[ "def", "local_regon_checksum", "(", "digits", ")", ":", "weights_for_check_digit", "=", "[", "2", ",", "4", ",", "8", ",", "5", ",", "0", ",", "9", ",", "7", ",", "3", ",", "6", ",", "1", ",", "2", ",", "4", ",", "8", "]", "check_digit", "=", "0", "for", "i", "in", "range", "(", "0", ",", "13", ")", ":", "check_digit", "+=", "weights_for_check_digit", "[", "i", "]", "*", "digits", "[", "i", "]", "check_digit", "%=", "11", "if", "check_digit", "==", "10", ":", "check_digit", "=", "0", "return", "check_digit" ]
Set the urn
def urn ( self , value : Union [ URN , str ] ) : if isinstance ( value , str ) : value = URN ( value ) elif not isinstance ( value , URN ) : raise TypeError ( "New urn must be string or {} instead of {}" . format ( type ( URN ) , type ( value ) ) ) self . _urn = value
7,158
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L55-L66
[ "def", "run", "(", "self", ")", ":", "self", ".", "log_id", "=", "0", "# all active websockets and their state", "self", ".", "websocks", "=", "{", "}", "# all active python interpreter sessions", "self", ".", "pysessions", "=", "{", "}", "if", "self", ".", "DISABLE_REQUESTS_DEBUG_LOGS", ":", "disable_requests_debug_logs", "(", ")", "self", ".", "threadpool", "=", "ThreadPool", "(", "self", ".", "THREADPOOL_WORKERS", ")", "self", ".", "api", "=", "None", "# tornado app object", "base_handlers", "=", "self", ".", "prepare_base_handlers", "(", ")", "handlers", "=", "self", ".", "prepare_handlers", "(", ")", "self", ".", "template_loader", "=", "TemplateLoader", "(", "[", "resolve_path", "(", "self", ".", "TEMPLATE_PATH", ")", "]", ")", "_", "=", "self", ".", "prepare_template_loader", "(", "self", ".", "template_loader", ")", "if", "_", "is", "not", "None", ":", "self", ".", "template_loader", "=", "_", "shclass", "=", "CustomStaticFileHandler", "shclass", ".", "PATHS", ".", "append", "(", "resolve_path", "(", "self", ".", "STATIC_PATH", ")", ")", "_", "=", "self", ".", "prepare_static_paths", "(", "shclass", ".", "PATHS", ")", "if", "_", "is", "not", "None", ":", "shclass", ".", "PATHS", "=", "_", "self", ".", "static_handler_class", "=", "shclass", "self", ".", "nav_tabs", "=", "[", "(", "'Home'", ",", "'/'", ")", "]", "if", "self", ".", "args", ".", "debug", ":", "self", ".", "nav_tabs", "+=", "[", "(", "'Console'", ",", "'/console'", ")", ",", "(", "'Logs'", ",", "'/logs'", ")", "]", "self", ".", "nav_tabs", "=", "self", ".", "prepare_nav_tabs", "(", "self", ".", "nav_tabs", ")", "settings", "=", "{", "'static_path'", ":", "'<DUMMY-INEXISTENT-PATH>'", ",", "'static_handler_class'", ":", "self", ".", "static_handler_class", ",", "'template_loader'", ":", "self", ".", "template_loader", ",", "'compress_response'", ":", "True", ",", "'debug'", ":", "self", ".", "args", ".", "debug", ",", "}", "all_handlers", "=", "handlers", "+", "base_handlers", "self", ".", "app", "=", "self", ".", "APP_CLASS", "(", "*", "*", "settings", ")", "self", ".", "app", ".", "add_handlers", "(", "self", ".", "VIRTUAL_HOST", ",", "all_handlers", ")", "sys", ".", "funcserver", "=", "self", ".", "app", ".", "funcserver", "=", "self", "self", ".", "api", "=", "self", ".", "prepare_api", "(", ")", "if", "self", ".", "api", "is", "not", "None", "and", "not", "hasattr", "(", "self", ".", "api", ",", "'log'", ")", ":", "self", ".", "api", ".", "log", "=", "self", ".", "log", "if", "self", ".", "args", ".", "port", "!=", "0", ":", "self", ".", "app", ".", "listen", "(", "self", ".", "args", ".", "port", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")" ]
Get easily a metadata from the CTS namespace
def get_cts_metadata ( self , key : str , lang : str = None ) -> Literal : return self . metadata . get_single ( RDF_NAMESPACES . CTS . term ( key ) , lang )
7,159
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L68-L75
[ "def", "BuildChecks", "(", "self", ",", "request", ")", ":", "result", "=", "[", "]", "if", "request", ".", "HasField", "(", "\"start_time\"", ")", "or", "request", ".", "HasField", "(", "\"end_time\"", ")", ":", "def", "FilterTimestamp", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_mtime\"", ")", "and", "(", "file_stat", ".", "st_mtime", "<", "request", ".", "start_time", "or", "file_stat", ".", "st_mtime", ">", "request", ".", "end_time", ")", "result", ".", "append", "(", "FilterTimestamp", ")", "if", "request", ".", "HasField", "(", "\"min_file_size\"", ")", "or", "request", ".", "HasField", "(", "\"max_file_size\"", ")", ":", "def", "FilterSize", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_size\"", ")", "and", "(", "file_stat", ".", "st_size", "<", "request", ".", "min_file_size", "or", "file_stat", ".", "st_size", ">", "request", ".", "max_file_size", ")", "result", ".", "append", "(", "FilterSize", ")", "if", "request", ".", "HasField", "(", "\"perm_mode\"", ")", ":", "def", "FilterPerms", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "(", "file_stat", ".", "st_mode", "&", "request", ".", "perm_mask", ")", "!=", "request", ".", "perm_mode", "result", ".", "append", "(", "FilterPerms", ")", "if", "request", ".", "HasField", "(", "\"uid\"", ")", ":", "def", "FilterUID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_uid", "!=", "request", ".", "uid", "result", ".", "append", "(", "FilterUID", ")", "if", "request", ".", "HasField", "(", "\"gid\"", ")", ":", "def", "FilterGID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_gid", "!=", "request", ".", "gid", "result", ".", "append", "(", "FilterGID", ")", "if", "request", ".", "HasField", "(", "\"path_regex\"", ")", ":", "regex", "=", "request", ".", "path_regex", "def", "FilterPath", "(", "file_stat", ",", "regex", "=", "regex", ")", ":", "\"\"\"Suppress any filename not matching the regular expression.\"\"\"", "return", "not", "regex", ".", "Search", "(", "file_stat", ".", "pathspec", ".", "Basename", "(", ")", ")", "result", ".", "append", "(", "FilterPath", ")", "if", "request", ".", "HasField", "(", "\"data_regex\"", ")", ":", "def", "FilterData", "(", "file_stat", ",", "*", "*", "_", ")", ":", "\"\"\"Suppress files that do not match the content.\"\"\"", "return", "not", "self", ".", "TestFileContent", "(", "file_stat", ")", "result", ".", "append", "(", "FilterData", ")", "return", "result" ]
Set the object metadata using its collections recursively
def set_metadata_from_collection ( self , text_metadata : CtsTextMetadata ) : edition , work , textgroup = tuple ( ( [ text_metadata ] + text_metadata . parents ) [ : 3 ] ) for node in textgroup . metadata . get ( RDF_NAMESPACES . CTS . groupname ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . groupname , lang = lang , value = str ( node ) ) self . set_creator ( str ( node ) , lang ) for node in work . metadata . get ( RDF_NAMESPACES . CTS . title ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . title , lang = lang , value = str ( node ) ) self . set_title ( str ( node ) , lang ) for node in edition . metadata . get ( RDF_NAMESPACES . CTS . label ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . label , lang = lang , value = str ( node ) ) self . set_subject ( str ( node ) , lang ) for node in edition . metadata . get ( RDF_NAMESPACES . CTS . description ) : lang = node . language self . metadata . add ( RDF_NAMESPACES . CTS . description , lang = lang , value = str ( node ) ) self . set_description ( str ( node ) , lang ) if not self . citation . is_set ( ) and edition . citation . is_set ( ) : self . citation = edition . citation
7,160
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L94-L123
[ "def", "_sendStatCmd", "(", "self", ",", "cmd", ")", ":", "try", ":", "self", ".", "_conn", ".", "write", "(", "\"%s\\r\\n\"", "%", "cmd", ")", "regex", "=", "re", ".", "compile", "(", "'^(END|ERROR)\\r\\n'", ",", "re", ".", "MULTILINE", ")", "(", "idx", ",", "mobj", ",", "text", ")", "=", "self", ".", "_conn", ".", "expect", "(", "[", "regex", ",", "]", ",", "self", ".", "_timeout", ")", "#@UnusedVariable", "except", ":", "raise", "Exception", "(", "\"Communication with %s failed\"", "%", "self", ".", "_instanceName", ")", "if", "mobj", "is", "not", "None", ":", "if", "mobj", ".", "group", "(", "1", ")", "==", "'END'", ":", "return", "text", ".", "splitlines", "(", ")", "[", ":", "-", "1", "]", "elif", "mobj", ".", "group", "(", "1", ")", "==", "'ERROR'", ":", "raise", "Exception", "(", "\"Protocol error in communication with %s.\"", "%", "self", ".", "_instanceName", ")", "else", ":", "raise", "Exception", "(", "\"Connection with %s timed out.\"", "%", "self", ".", "_instanceName", ")" ]
Creates a single datapoint dict with a value timestamp and tags .
def create_datapoint ( value , timestamp = None , * * tags ) : if timestamp is None : timestamp = time_millis ( ) if type ( timestamp ) is datetime : timestamp = datetime_to_time_millis ( timestamp ) item = { 'timestamp' : timestamp , 'value' : value } if tags is not None : item [ 'tags' ] = tags return item
7,161
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L362-L382
[ "def", "unshare", "(", "flags", ")", ":", "res", "=", "lib", ".", "unshare", "(", "flags", ")", "if", "res", "!=", "0", ":", "_check_error", "(", "ffi", ".", "errno", ")" ]
Create Hawkular - Metrics submittable structure .
def create_metric ( metric_type , metric_id , data ) : if not isinstance ( data , list ) : data = [ data ] return { 'type' : metric_type , 'id' : metric_id , 'data' : data }
7,162
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L384-L395
[ "def", "update_vpc_entry", "(", "nexus_ips", ",", "vpc_id", ",", "learned", ",", "active", ")", ":", "LOG", ".", "debug", "(", "\"update_vpc_entry called\"", ")", "session", "=", "bc", ".", "get_writer_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "for", "n_ip", "in", "nexus_ips", ":", "flipit", "=", "not", "active", "x", "=", "session", ".", "execute", "(", "sa", ".", "update", "(", "nexus_models_v2", ".", "NexusVPCAlloc", ")", ".", "values", "(", "{", "'learned'", ":", "learned", ",", "'active'", ":", "active", "}", ")", ".", "where", "(", "sa", ".", "and_", "(", "nexus_models_v2", ".", "NexusVPCAlloc", ".", "switch_ip", "==", "n_ip", ",", "nexus_models_v2", ".", "NexusVPCAlloc", ".", "vpc_id", "==", "vpc_id", ",", "nexus_models_v2", ".", "NexusVPCAlloc", ".", "active", "==", "flipit", ")", ")", ")", "if", "x", ".", "rowcount", "!=", "1", ":", "raise", "c_exc", ".", "NexusVPCAllocNotFound", "(", "switch_ip", "=", "n_ip", ",", "vpc_id", "=", "vpc_id", ",", "active", "=", "active", ")" ]
Send multiple different metric_ids to the server in a single batch . Metrics can be a mixture of types .
def put ( self , data ) : if not isinstance ( data , list ) : data = [ data ] r = collections . defaultdict ( list ) for d in data : metric_type = d . pop ( 'type' , None ) if metric_type is None : raise HawkularError ( 'Undefined MetricType' ) r [ metric_type ] . append ( d ) # This isn't transactional, but .. ouh well. One can always repost everything. for l in r : self . _post ( self . _get_metrics_raw_url ( self . _get_url ( l ) ) , r [ l ] , parse_json = False )
7,163
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L109-L129
[ "def", "solidity_get_contract_key", "(", "all_contracts", ",", "filepath", ",", "contract_name", ")", ":", "if", "contract_name", "in", "all_contracts", ":", "return", "contract_name", "else", ":", "if", "filepath", "is", "None", ":", "filename", "=", "'<stdin>'", "else", ":", "_", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "filepath", ")", "contract_key", "=", "filename", "+", "\":\"", "+", "contract_name", "return", "contract_key", "if", "contract_key", "in", "all_contracts", "else", "None" ]
Pushes a single metric_id datapoint combination to the server .
def push ( self , metric_type , metric_id , value , timestamp = None ) : if type ( timestamp ) is datetime : timestamp = datetime_to_time_millis ( timestamp ) item = create_metric ( metric_type , metric_id , create_datapoint ( value , timestamp ) ) self . put ( item )
7,164
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L131-L147
[ "def", "connect", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "db", "=", "sqlite3", ".", "connect", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "db", ".", "isolation_level", "=", "None" ]
Query for metrics datapoints from the server .
def query_metric ( self , metric_type , metric_id , start = None , end = None , * * query_options ) : if start is not None : if type ( start ) is datetime : query_options [ 'start' ] = datetime_to_time_millis ( start ) else : query_options [ 'start' ] = start if end is not None : if type ( end ) is datetime : query_options [ 'end' ] = datetime_to_time_millis ( end ) else : query_options [ 'end' ] = end return self . _get ( self . _get_metrics_raw_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) , * * query_options )
7,165
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L149-L174
[ "def", "symlink_bundles", "(", "self", ",", "app", ",", "bundle_dir", ")", ":", "for", "bundle_counter", ",", "bundle", "in", "enumerate", "(", "app", ".", "bundles", ")", ":", "count", "=", "0", "for", "path", ",", "relpath", "in", "bundle", ".", "filemap", ".", "items", "(", ")", ":", "bundle_path", "=", "os", ".", "path", ".", "join", "(", "bundle_dir", ",", "relpath", ")", "count", "+=", "1", "if", "os", ".", "path", ".", "exists", "(", "bundle_path", ")", ":", "continue", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "safe_mkdir", "(", "os", ".", "path", ".", "dirname", "(", "bundle_path", ")", ")", "os", ".", "symlink", "(", "path", ",", "bundle_path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "safe_mkdir", "(", "bundle_path", ")", "if", "count", "==", "0", ":", "raise", "TargetDefinitionException", "(", "app", ".", "target", ",", "'Bundle index {} of \"bundles\" field '", "'does not match any files.'", ".", "format", "(", "bundle_counter", ")", ")" ]
Query for metric aggregates from the server . This is called buckets in the Hawkular - Metrics documentation .
def query_metric_stats ( self , metric_type , metric_id = None , start = None , end = None , bucketDuration = None , * * query_options ) : if start is not None : if type ( start ) is datetime : query_options [ 'start' ] = datetime_to_time_millis ( start ) else : query_options [ 'start' ] = start if end is not None : if type ( end ) is datetime : query_options [ 'end' ] = datetime_to_time_millis ( end ) else : query_options [ 'end' ] = end if bucketDuration is not None : if type ( bucketDuration ) is timedelta : query_options [ 'bucketDuration' ] = timedelta_to_duration ( bucketDuration ) else : query_options [ 'bucketDuration' ] = bucketDuration if metric_id is not None : url = self . _get_metrics_stats_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) else : if len ( query_options ) < 0 : raise HawkularError ( 'Tags are required when querying without metric_id' ) url = self . _get_metrics_stats_url ( self . _get_url ( metric_type ) ) return self . _get ( url , * * query_options )
7,166
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L176-L212
[ "def", "create_serialization_dir", "(", "params", ":", "Params", ",", "serialization_dir", ":", "str", ",", "recover", ":", "bool", ",", "force", ":", "bool", ")", "->", "None", ":", "if", "recover", "and", "force", ":", "raise", "ConfigurationError", "(", "\"Illegal arguments: both force and recover are true.\"", ")", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "force", ":", "shutil", ".", "rmtree", "(", "serialization_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "os", ".", "listdir", "(", "serialization_dir", ")", ":", "if", "not", "recover", ":", "raise", "ConfigurationError", "(", "f\"Serialization directory ({serialization_dir}) already exists and is \"", "f\"not empty. Specify --recover to recover training from existing output.\"", ")", "logger", ".", "info", "(", "f\"Recovering from prior training at {serialization_dir}.\"", ")", "recovered_config_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "CONFIG_NAME", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "recovered_config_file", ")", ":", "raise", "ConfigurationError", "(", "\"The serialization directory already exists but doesn't \"", "\"contain a config.json. You probably gave the wrong directory.\"", ")", "else", ":", "loaded_params", "=", "Params", ".", "from_file", "(", "recovered_config_file", ")", "# Check whether any of the training configuration differs from the configuration we are", "# resuming. If so, warn the user that training may fail.", "fail", "=", "False", "flat_params", "=", "params", ".", "as_flat_dict", "(", ")", "flat_loaded", "=", "loaded_params", ".", "as_flat_dict", "(", ")", "for", "key", "in", "flat_params", ".", "keys", "(", ")", "-", "flat_loaded", ".", "keys", "(", ")", ":", "logger", ".", "error", "(", "f\"Key '{key}' found in training configuration but not in the serialization \"", "f\"directory we're recovering from.\"", ")", "fail", "=", "True", "for", "key", "in", "flat_loaded", ".", "keys", "(", ")", "-", "flat_params", ".", "keys", "(", ")", ":", "logger", ".", "error", "(", "f\"Key '{key}' found in the serialization directory we're recovering from \"", "f\"but not in the training config.\"", ")", "fail", "=", "True", "for", "key", "in", "flat_params", ".", "keys", "(", ")", ":", "if", "flat_params", ".", "get", "(", "key", ",", "None", ")", "!=", "flat_loaded", ".", "get", "(", "key", ",", "None", ")", ":", "logger", ".", "error", "(", "f\"Value for '{key}' in training configuration does not match that the value in \"", "f\"the serialization directory we're recovering from: \"", "f\"{flat_params[key]} != {flat_loaded[key]}\"", ")", "fail", "=", "True", "if", "fail", ":", "raise", "ConfigurationError", "(", "\"Training configuration does not match the configuration we're \"", "\"recovering from.\"", ")", "else", ":", "if", "recover", ":", "raise", "ConfigurationError", "(", "f\"--recover specified but serialization_dir ({serialization_dir}) \"", "\"does not exist. There is nothing to recover from.\"", ")", "os", ".", "makedirs", "(", "serialization_dir", ",", "exist_ok", "=", "True", ")" ]
Query definition of a single metric id .
def query_metric_definition ( self , metric_type , metric_id ) : return self . _get ( self . _get_metrics_single_url ( metric_type , metric_id ) )
7,167
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L214-L221
[ "def", "setup_recovery", "(", "working_dir", ")", ":", "db", "=", "get_db_state", "(", "working_dir", ")", "bitcoind_session", "=", "get_bitcoind", "(", "new", "=", "True", ")", "assert", "bitcoind_session", "is", "not", "None", "_", ",", "current_block", "=", "virtualchain", ".", "get_index_range", "(", "'bitcoin'", ",", "bitcoind_session", ",", "virtualchain_hooks", ",", "working_dir", ")", "assert", "current_block", ",", "'Failed to connect to bitcoind'", "set_recovery_range", "(", "working_dir", ",", "db", ".", "lastblock", ",", "current_block", "-", "NUM_CONFIRMATIONS", ")", "return", "True" ]
Query available metric definitions .
def query_metric_definitions ( self , metric_type = None , id_filter = None , * * tags ) : params = { } if id_filter is not None : params [ 'id' ] = id_filter if metric_type is not None : params [ 'type' ] = MetricType . short ( metric_type ) if len ( tags ) > 0 : params [ 'tags' ] = self . _transform_tags ( * * tags ) return self . _get ( self . _get_url ( ) , * * params )
7,168
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L223-L242
[ "def", "asynchronize", "(", "framework", ",", "sync_method", ",", "doc", "=", "None", ",", "wrap_class", "=", "None", ",", "unwrap_class", "=", "None", ")", ":", "@", "functools", ".", "wraps", "(", "sync_method", ")", "def", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "unwrap_class", "is", "not", "None", ":", "# Don't call isinstance(), not checking subclasses.", "unwrapped_args", "=", "[", "obj", ".", "delegate", "if", "obj", ".", "__class__", ".", "__name__", ".", "endswith", "(", "(", "unwrap_class", ",", "'MotorClientSession'", ")", ")", "else", "obj", "for", "obj", "in", "args", "]", "unwrapped_kwargs", "=", "{", "key", ":", "(", "obj", ".", "delegate", "if", "obj", ".", "__class__", ".", "__name__", ".", "endswith", "(", "(", "unwrap_class", ",", "'MotorClientSession'", ")", ")", "else", "obj", ")", "for", "key", ",", "obj", "in", "kwargs", ".", "items", "(", ")", "}", "else", ":", "# For speed, don't call unwrap_args_session/unwrap_kwargs_session.", "unwrapped_args", "=", "[", "obj", ".", "delegate", "if", "obj", ".", "__class__", ".", "__name__", ".", "endswith", "(", "'MotorClientSession'", ")", "else", "obj", "for", "obj", "in", "args", "]", "unwrapped_kwargs", "=", "{", "key", ":", "(", "obj", ".", "delegate", "if", "obj", ".", "__class__", ".", "__name__", ".", "endswith", "(", "'MotorClientSession'", ")", "else", "obj", ")", "for", "key", ",", "obj", "in", "kwargs", ".", "items", "(", ")", "}", "loop", "=", "self", ".", "get_io_loop", "(", ")", "return", "framework", ".", "run_on_executor", "(", "loop", ",", "sync_method", ",", "self", ".", "delegate", ",", "*", "unwrapped_args", ",", "*", "*", "unwrapped_kwargs", ")", "if", "wrap_class", "is", "not", "None", ":", "method", "=", "framework", ".", "pymongo_class_wrapper", "(", "method", ",", "wrap_class", ")", "method", ".", "is_wrap_method", "=", "True", "# For Synchro.", "# This is for the benefit of motor_extensions.py, which needs this info to", "# generate documentation with Sphinx.", "method", ".", "is_async_method", "=", "True", "name", "=", "sync_method", ".", "__name__", "method", ".", "pymongo_method_name", "=", "name", "if", "doc", "is", "not", "None", ":", "method", ".", "__doc__", "=", "doc", "return", "method" ]
Query for possible tag values .
def query_tag_values ( self , metric_type = None , * * tags ) : tagql = self . _transform_tags ( * * tags ) return self . _get ( self . _get_metrics_tags_url ( self . _get_url ( metric_type ) ) + '/{}' . format ( tagql ) )
7,169
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L244-L253
[ "def", "feed", "(", "self", ")", ":", "line", "=", "self", ".", "testload", ".", "sentences", "[", "self", ".", "index", "%", "len", "(", "self", ".", "testload", ".", "sentences", ")", "]", "if", "\"%Delay:\"", "in", "line", ":", "# Delay specified number of seconds", "delay", "=", "line", ".", "split", "(", ")", "[", "1", "]", "time", ".", "sleep", "(", "int", "(", "delay", ")", ")", "# self.write has to be set by the derived class", "self", ".", "write", "(", "line", ")", "if", "self", ".", "progress", ":", "self", ".", "progress", "(", "\"gpsfake: %s feeds %d=%s\\n\"", "%", "(", "self", ".", "testload", ".", "name", ",", "len", "(", "line", ")", ",", "repr", "(", "line", ")", ")", ")", "time", ".", "sleep", "(", "WRITE_PAD", ")", "self", ".", "index", "+=", "1" ]
Returns a list of tags in the metric definition .
def query_metric_tags ( self , metric_type , metric_id ) : definition = self . _get ( self . _get_metrics_tags_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) ) return definition
7,170
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L284-L292
[ "def", "wrap_conn", "(", "conn_func", ")", ":", "def", "call", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "conn", "=", "conn_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "cursor_func", "=", "getattr", "(", "conn", ",", "CURSOR_WRAP_METHOD", ")", "wrapped", "=", "wrap_cursor", "(", "cursor_func", ")", "setattr", "(", "conn", ",", "cursor_func", ".", "__name__", ",", "wrapped", ")", "return", "conn", "except", "Exception", ":", "# pragma: NO COVER", "logging", ".", "warning", "(", "'Fail to wrap conn, mysql not traced.'", ")", "return", "conn_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "call" ]
Delete one or more tags from the metric definition .
def delete_metric_tags ( self , metric_type , metric_id , * * deleted_tags ) : tags = self . _transform_tags ( * * deleted_tags ) tags_url = self . _get_metrics_tags_url ( self . _get_metrics_single_url ( metric_type , metric_id ) ) + '/{0}' . format ( tags ) self . _delete ( tags_url )
7,171
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L304-L315
[ "def", "get_index_range", "(", "working_dir", ")", ":", "bitcoind_session", "=", "get_bitcoind", "(", "new", "=", "True", ")", "assert", "bitcoind_session", "is", "not", "None", "first_block", "=", "None", "last_block", "=", "None", "wait", "=", "1.0", "while", "last_block", "is", "None", "and", "is_running", "(", ")", ":", "first_block", ",", "last_block", "=", "virtualchain", ".", "get_index_range", "(", "'bitcoin'", ",", "bitcoind_session", ",", "virtualchain_hooks", ",", "working_dir", ")", "if", "first_block", "is", "None", "or", "last_block", "is", "None", ":", "# try to reconnnect", "log", ".", "error", "(", "\"Reconnect to bitcoind in {} seconds\"", ".", "format", "(", "wait", ")", ")", "time", ".", "sleep", "(", "wait", ")", "wait", "=", "min", "(", "wait", "*", "2.0", "+", "random", ".", "random", "(", ")", "*", "wait", ",", "60", ")", "bitcoind_session", "=", "get_bitcoind", "(", "new", "=", "True", ")", "continue", "else", ":", "return", "first_block", ",", "last_block", "-", "NUM_CONFIRMATIONS", "return", "None", ",", "None" ]
Create a tenant . Currently nothing can be set ( to be fixed after the master version of Hawkular - Metrics has fixed implementation .
def create_tenant ( self , tenant_id , retentions = None ) : item = { 'id' : tenant_id } if retentions is not None : item [ 'retentions' ] = retentions self . _post ( self . _get_tenants_url ( ) , json . dumps ( item , indent = 2 ) )
7,172
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L327-L338
[ "def", "files_comments_delete", "(", "self", ",", "*", ",", "file", ":", "str", ",", "id", ":", "str", ",", "*", "*", "kwargs", ")", "->", "SlackResponse", ":", "kwargs", ".", "update", "(", "{", "\"file\"", ":", "file", ",", "\"id\"", ":", "id", "}", ")", "return", "self", ".", "api_call", "(", "\"files.comments.delete\"", ",", "json", "=", "kwargs", ")" ]
Return an instance of the options dictionary with the minimally required parameters for a JobCalculation and set to default values unless overriden
def get_default_options ( num_machines = 1 , max_wallclock_seconds = 1800 , withmpi = False ) : return { 'resources' : { 'num_machines' : int ( num_machines ) } , 'max_wallclock_seconds' : int ( max_wallclock_seconds ) , 'withmpi' : withmpi , }
7,173
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/common/resources.py#L5-L20
[ "def", "wait", "(", "self", ")", ":", "ret", "=", "os", ".", "read", "(", "self", ".", "event_fd", ",", "64", "/", "8", ")", "return", "struct", ".", "unpack", "(", "'Q'", ",", "ret", ")" ]
Take a screenshot after a failed step .
def take_screenshot ( self ) : if not self . failed : return browser = getattr ( world , 'browser' , None ) if not browser : return try : scenario_name = self . scenario . name scenario_index = self . scenario . feature . scenarios . index ( self . scenario ) + 1 except AttributeError : scenario_name = self . background . keyword scenario_index = 0 if self . outline is None : outline_index_str = '' else : outline_index = self . scenario . outlines . index ( self . outline ) + 1 outline_index_str = '_{}' . format ( outline_index ) base_name = FORMAT . format ( feature_file = os . path . relpath ( self . feature . filename ) , scenario_index = scenario_index , scenario_name = scenario_name , outline_index = outline_index_str , ) base_name = re . sub ( r'\W' , '_' , base_name , flags = re . UNICODE ) base_name = os . path . join ( DIRECTORY , base_name ) world . browser . save_screenshot ( '{}.png' . format ( base_name ) ) with open ( '{}.html' . format ( base_name ) , 'w' ) as page_source_file : page_source_file . write ( world . browser . page_source )
7,174
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/screenshot_failed.py#L64-L100
[ "def", "load_from_package", "(", ")", ":", "try", ":", "import", "pkg_resources", "f", "=", "pkg_resources", ".", "resource_stream", "(", "meta", ".", "__app__", ",", "'cache/unicategories.cache'", ")", "dversion", ",", "mversion", ",", "data", "=", "pickle", ".", "load", "(", "f", ")", "if", "dversion", "==", "data_version", "and", "mversion", "==", "module_version", ":", "return", "data", "warnings", ".", "warn", "(", "'Unicode unicategories database is outdated. '", "'Please reinstall unicategories module to regenerate it.'", "if", "dversion", "<", "data_version", "else", "'Incompatible unicategories database. '", "'Please reinstall unicategories module to regenerate it.'", ")", "except", "(", "ValueError", ",", "EOFError", ")", ":", "warnings", ".", "warn", "(", "'Incompatible unicategories database. '", "'Please reinstall unicategories module to regenerate it.'", ")", "except", "(", "ImportError", ",", "FileNotFoundError", ")", ":", "pass" ]
calculate Kunc EOS see Dorogokupets 2015 for detail
def kunc_p ( v , v0 , k0 , k0p , order = 5 ) : return cal_p_kunc ( v , [ v0 , k0 , k0p ] , order = order , uncertainties = isuncertainties ( [ v , v0 , k0 , k0p ] ) )
7,175
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_kunc.py#L13-L26
[ "def", "update_config", "(", "updated_project", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ",", "'r'", ")", "as", "fp", ":", "projects", "=", "yaml", ".", "load", "(", "fp", ".", "read", "(", ")", ")", "replace_index", "=", "-", "1", "for", "i", ",", "project", "in", "enumerate", "(", "projects", ")", ":", "if", "project", "[", "'name'", "]", "==", "updated_project", "[", "'name'", "]", ":", "replace_index", "=", "i", "if", "replace_index", ">", "-", "1", ":", "projects", "[", "replace_index", "]", "=", "updated_project", "store_config", "(", "projects", ")", "else", ":", "print", "(", "'Not saving configuration'", ")", "print", "(", "colored", "(", "'Project: '", "+", "updated_project", "[", "'name'", "]", "+", "' was not found in configured projects!'", ",", "'red'", ")", ")", "else", ":", "print", "(", "'Transfer is not configured.'", ")", "print", "(", "'Please run:'", ")", "print", "(", "''", ")", "print", "(", "colored", "(", "' transfer --configure'", ",", "'cyan'", ")", ")", "return" ]
calculate Kunc EOS see Dorogokupets2015 for functional form
def cal_p_kunc ( v , k , order = 5 , uncertainties = True ) : v0 = k [ 0 ] k0 = k [ 1 ] k0p = k [ 2 ] x = np . power ( v / v0 , 1. / 3. ) f1 = ( 1. - x ) / ( np . power ( x , order ) ) if uncertainties : f2 = unp . exp ( ( 1.5 * k0p - order + 0.5 ) * ( 1. - x ) ) else : f2 = np . exp ( ( 1.5 * k0p - order + 0.5 ) * ( 1. - x ) ) p = 3. * k0 * f1 * f2 return p
7,176
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_kunc.py#L29-L51
[ "def", "update_config", "(", "updated_project", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ",", "'r'", ")", "as", "fp", ":", "projects", "=", "yaml", ".", "load", "(", "fp", ".", "read", "(", ")", ")", "replace_index", "=", "-", "1", "for", "i", ",", "project", "in", "enumerate", "(", "projects", ")", ":", "if", "project", "[", "'name'", "]", "==", "updated_project", "[", "'name'", "]", ":", "replace_index", "=", "i", "if", "replace_index", ">", "-", "1", ":", "projects", "[", "replace_index", "]", "=", "updated_project", "store_config", "(", "projects", ")", "else", ":", "print", "(", "'Not saving configuration'", ")", "print", "(", "colored", "(", "'Project: '", "+", "updated_project", "[", "'name'", "]", "+", "' was not found in configured projects!'", ",", "'red'", ")", ")", "else", ":", "print", "(", "'Transfer is not configured.'", ")", "print", "(", "'Please run:'", ")", "print", "(", "''", ")", "print", "(", "colored", "(", "' transfer --configure'", ",", "'cyan'", ")", ")", "return" ]
Recursively find all files in the indicated directory
def find_files ( path = '' , ext = '' , level = None , typ = list , dirs = False , files = True , verbosity = 0 ) : gen = generate_files ( path , ext = ext , level = level , dirs = dirs , files = files , verbosity = verbosity ) if isinstance ( typ ( ) , collections . Mapping ) : return typ ( ( ff [ 'path' ] , ff ) for ff in gen ) elif typ is not None : return typ ( gen ) else : return gen
7,177
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/futil.py#L111-L163
[ "def", "from_Composition", "(", "composition", ",", "width", "=", "80", ")", ":", "# Collect tunings", "instr_tunings", "=", "[", "]", "for", "track", "in", "composition", ":", "tun", "=", "track", ".", "get_tuning", "(", ")", "if", "tun", ":", "instr_tunings", ".", "append", "(", "tun", ")", "else", ":", "instr_tunings", ".", "append", "(", "default_tuning", ")", "result", "=", "add_headers", "(", "width", ",", "composition", ".", "title", ",", "composition", ".", "subtitle", ",", "composition", ".", "author", ",", "composition", ".", "email", ",", "composition", ".", "description", ",", "instr_tunings", ",", ")", "# Some variables", "w", "=", "_get_width", "(", "width", ")", "barindex", "=", "0", "bars", "=", "width", "/", "w", "lastlen", "=", "0", "maxlen", "=", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "composition", ".", "tracks", "]", ")", "while", "barindex", "<", "maxlen", ":", "notfirst", "=", "False", "for", "tracks", "in", "composition", ":", "tuning", "=", "tracks", ".", "get_tuning", "(", ")", "ascii", "=", "[", "]", "for", "x", "in", "xrange", "(", "bars", ")", ":", "if", "barindex", "+", "x", "<", "len", "(", "tracks", ")", ":", "bar", "=", "tracks", "[", "barindex", "+", "x", "]", "r", "=", "from_Bar", "(", "bar", ",", "w", ",", "tuning", ",", "collapse", "=", "False", ")", "barstart", "=", "r", "[", "1", "]", ".", "find", "(", "'||'", ")", "+", "2", "# Add extra '||' to quarter note marks to connect tracks.", "if", "notfirst", ":", "r", "[", "0", "]", "=", "(", "r", "[", "0", "]", ")", "[", ":", "barstart", "-", "2", "]", "+", "'||'", "+", "(", "r", "[", "0", "]", ")", "[", "barstart", ":", "]", "# Add bar to ascii", "if", "ascii", "!=", "[", "]", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "r", ")", "+", "1", ")", ":", "item", "=", "r", "[", "len", "(", "r", ")", "-", "i", "]", "ascii", "[", "-", "i", "]", "+=", "item", "[", "barstart", ":", "]", "else", ":", "ascii", "+=", "r", "# Add extra '||' to connect tracks", "if", "notfirst", "and", "ascii", "!=", "[", "]", ":", "pad", "=", "ascii", "[", "-", "1", "]", ".", "find", "(", "'||'", ")", "result", "+=", "[", "' '", "*", "pad", "+", "'||'", ",", "' '", "*", "pad", "+", "'||'", "]", "else", ":", "notfirst", "=", "True", "# Finally, add ascii to result", "result", "+=", "ascii", "result", "+=", "[", "''", ",", "''", ",", "''", "]", "barindex", "+=", "bars", "return", "os", ".", "linesep", ".", "join", "(", "result", ")" ]
Serialize the token and return it as bytes .
def serialize ( self ) : if type ( self . value ) == int : return "i{:X}s" . format ( self . value ) . encode ( 'ascii' ) if type ( self . value ) == str : value = self . value . encode ( 'utf-8' ) return "{:X}:" . format ( len ( value ) ) . encode ( 'ascii' ) + value if type ( self . value ) == bytes : value = base64 . standard_b64encode ( self . value ) return "u{:X}:" . format ( len ( value ) ) . encode ( 'ascii' ) + value if type ( self . value ) == list : items = [ LiveMessageToken ( m ) . serialize ( ) for m in self . value ] return b'l' + b'' . join ( items ) + b's' if type ( self . value ) == dict : items = [ ] for key , value in self . value . items ( ) : items . append ( LiveMessageToken ( str ( key ) ) . serialize ( ) ) items . append ( LiveMessageToken ( value ) . serialize ( ) ) return b'h' + b'' . join ( items ) + b's' raise RuntimeError ( "Unknown type %s" % type ( self . value ) )
7,178
https://github.com/erijo/tellive-py/blob/a84ebb1eb29ee4c69a085e55e523ac5fff0087fc/tellive/livemessage.py#L31-L55
[ "async", "def", "get", "(", "cls", ",", "websession", ",", "lat", ",", "lon", ")", ":", "self", "=", "Station", "(", "websession", ")", "stations", "=", "await", "self", ".", "api", ".", "stations", "(", ")", "self", ".", "station", "=", "self", ".", "_filter_closest", "(", "lat", ",", "lon", ",", "stations", ")", "logger", ".", "info", "(", "\"Using %s as weather station\"", ",", "self", ".", "station", ".", "local", ")", "return", "self" ]
Write Smother results to a file .
def write ( self , file_or_path , append = False , timeout = 10 ) : if isinstance ( file_or_path , six . string_types ) : if self . coverage : file_or_path = get_smother_filename ( file_or_path , self . coverage . config . parallel ) outfile = Lock ( file_or_path , mode = 'a+' , timeout = timeout , fail_when_locked = False ) else : outfile = noclose ( file_or_path ) with outfile as fh : if append : fh . seek ( 0 ) try : other = Smother . load ( fh ) except ValueError : # no smother data pass else : self |= other fh . seek ( 0 ) fh . truncate ( ) # required to overwrite data in a+ mode json . dump ( self . data , fh )
7,179
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L87-L137
[ "def", "rpc_start", "(", "working_dir", ",", "port", ",", "subdomain_index", "=", "None", ",", "thread", "=", "True", ")", ":", "rpc_srv", "=", "BlockstackdRPCServer", "(", "working_dir", ",", "port", ",", "subdomain_index", "=", "subdomain_index", ")", "log", ".", "debug", "(", "\"Starting RPC on port {}\"", ".", "format", "(", "port", ")", ")", "if", "thread", ":", "rpc_srv", ".", "start", "(", ")", "return", "rpc_srv" ]
Return which set of test contexts intersect a set of code regions .
def query_context ( self , regions , file_factory = PythonFile ) : result = set ( ) for region in regions : try : pf = file_factory ( region . filename ) except InvalidPythonFile : continue # region and/or coverage report may use paths # relative to this directory. Ensure we find a match # if they use different conventions. paths = { os . path . abspath ( region . filename ) , os . path . relpath ( region . filename ) } for test_context , hits in six . iteritems ( self . data ) : if test_context in result : continue for path in paths : if region . intersects ( pf , hits . get ( path , [ ] ) ) : result . add ( test_context ) return QueryResult ( result )
7,180
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L174-L214
[ "def", "promote_owner", "(", "self", ",", "stream_id", ",", "user_id", ")", ":", "req_hook", "=", "'pod/v1/room/'", "+", "stream_id", "+", "'/membership/promoteOwner'", "req_args", "=", "'{ \"id\": %s }'", "%", "user_id", "status_code", ",", "response", "=", "self", ".", "__rest__", ".", "POST_query", "(", "req_hook", ",", "req_args", ")", "self", ".", "logger", ".", "debug", "(", "'%s: %s'", "%", "(", "status_code", ",", "response", ")", ")", "return", "status_code", ",", "response" ]
Adds a child to the CitationSet
def add_child ( self , child ) : if isinstance ( child , BaseCitation ) : self . _children . append ( child )
7,181
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_base.py#L57-L64
[ "def", "pop", "(", "self", ",", "symbol", ")", ":", "last_metadata", "=", "self", ".", "find_one", "(", "{", "'symbol'", ":", "symbol", "}", ",", "sort", "=", "[", "(", "'start_time'", ",", "pymongo", ".", "DESCENDING", ")", "]", ")", "if", "last_metadata", "is", "None", ":", "raise", "NoDataFoundException", "(", "'No metadata found for symbol {}'", ".", "format", "(", "symbol", ")", ")", "self", ".", "find_one_and_delete", "(", "{", "'symbol'", ":", "symbol", "}", ",", "sort", "=", "[", "(", "'start_time'", ",", "pymongo", ".", "DESCENDING", ")", "]", ")", "mongo_retry", "(", "self", ".", "find_one_and_update", ")", "(", "{", "'symbol'", ":", "symbol", "}", ",", "{", "'$unset'", ":", "{", "'end_time'", ":", "''", "}", "}", ",", "sort", "=", "[", "(", "'start_time'", ",", "pymongo", ".", "DESCENDING", ")", "]", ")", "return", "last_metadata" ]
Depth of the citation scheme
def depth ( self ) -> int : if len ( self . children ) : return 1 + max ( [ child . depth for child in self . children ] ) else : return 1
7,182
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_base.py#L298-L310
[ "def", "RepackAllTemplates", "(", "self", ",", "upload", "=", "False", ",", "token", "=", "None", ")", ":", "for", "template", "in", "os", ".", "listdir", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.template_dir\"", "]", ")", ":", "template_path", "=", "os", ".", "path", ".", "join", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.template_dir\"", "]", ",", "template", ")", "self", ".", "RepackTemplate", "(", "template_path", ",", "os", ".", "path", ".", "join", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.executables_dir\"", "]", ",", "\"installers\"", ")", ",", "upload", "=", "upload", ",", "token", "=", "token", ")", "# If it's windows also repack a debug version.", "if", "template_path", ".", "endswith", "(", "\".exe.zip\"", ")", ":", "print", "(", "\"Repacking as debug installer: %s.\"", "%", "template_path", ")", "self", ".", "RepackTemplate", "(", "template_path", ",", "os", ".", "path", ".", "join", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.executables_dir\"", "]", ",", "\"installers\"", ")", ",", "upload", "=", "upload", ",", "token", "=", "token", ",", "context", "=", "[", "\"DebugClientBuild Context\"", "]", ")" ]
Set given link in CTS Namespace
def set_link ( self , prop , value ) : # https://rdflib.readthedocs.io/en/stable/ # URIRef == identifiers (urn, http, URI in general) # Literal == String or Number (can have a language) # BNode == Anonymous nodes (So no specific identifier) # eg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode) if not isinstance ( value , URIRef ) : value = URIRef ( value ) self . metadata . add ( prop , value )
7,183
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L118-L135
[ "def", "Descargar", "(", "self", ",", "url", "=", "URL", ",", "filename", "=", "\"padron.txt\"", ",", "proxy", "=", "None", ")", ":", "proxies", "=", "{", "}", "if", "proxy", ":", "proxies", "[", "'http'", "]", "=", "proxy", "proxies", "[", "'https'", "]", "=", "proxy", "proxy_handler", "=", "urllib2", ".", "ProxyHandler", "(", "proxies", ")", "print", "\"Abriendo URL %s ...\"", "%", "url", "req", "=", "urllib2", ".", "Request", "(", "url", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "http_date", "=", "formatdate", "(", "timeval", "=", "os", ".", "path", ".", "getmtime", "(", "filename", ")", ",", "localtime", "=", "False", ",", "usegmt", "=", "True", ")", "req", ".", "add_header", "(", "'If-Modified-Since'", ",", "http_date", ")", "try", ":", "web", "=", "urllib2", ".", "urlopen", "(", "req", ")", "except", "urllib2", ".", "HTTPError", ",", "e", ":", "if", "e", ".", "code", "==", "304", ":", "print", "\"No modificado desde\"", ",", "http_date", "return", "304", "else", ":", "raise", "# leer info del request:", "meta", "=", "web", ".", "info", "(", ")", "lenght", "=", "float", "(", "meta", "[", "'Content-Length'", "]", ")", "date", "=", "meta", "[", "'Last-Modified'", "]", "tmp", "=", "open", "(", "filename", "+", "\".zip\"", ",", "\"wb\"", ")", "print", "\"Guardando\"", "size", "=", "0", "p0", "=", "None", "while", "True", ":", "p", "=", "int", "(", "size", "/", "lenght", "*", "100", ")", "if", "p0", "is", "None", "or", "p", ">", "p0", ":", "print", "\"Leyendo ... %0d %%\"", "%", "p", "p0", "=", "p", "data", "=", "web", ".", "read", "(", "1024", "*", "100", ")", "size", "=", "size", "+", "len", "(", "data", ")", "if", "not", "data", ":", "print", "\"Descarga Terminada!\"", "break", "tmp", ".", "write", "(", "data", ")", "print", "\"Abriendo ZIP...\"", "tmp", ".", "close", "(", ")", "web", ".", "close", "(", ")", "uf", "=", "open", "(", "filename", "+", "\".zip\"", ",", "\"rb\"", ")", "zf", "=", "zipfile", ".", "ZipFile", "(", "uf", ")", "for", "fn", "in", "zf", ".", "namelist", "(", ")", ":", "print", "\"descomprimiendo\"", ",", "fn", "tf", "=", "open", "(", "filename", ",", "\"wb\"", ")", "tf", ".", "write", "(", "zf", ".", "read", "(", "fn", ")", ")", "tf", ".", "close", "(", ")", "return", "200" ]
Get all editions of the texts
def editions ( self ) : return [ item for urn , item in self . parent . children . items ( ) if isinstance ( item , CtsEditionMetadata ) ]
7,184
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L263-L273
[ "def", "make_client", "(", "zhmc", ",", "userid", "=", "None", ",", "password", "=", "None", ")", ":", "global", "USERID", ",", "PASSWORD", "# pylint: disable=global-statement", "USERID", "=", "userid", "or", "USERID", "or", "six", ".", "input", "(", "'Enter userid for HMC {}: '", ".", "format", "(", "zhmc", ")", ")", "PASSWORD", "=", "password", "or", "PASSWORD", "or", "getpass", ".", "getpass", "(", "'Enter password for {}: '", ".", "format", "(", "USERID", ")", ")", "session", "=", "zhmcclient", ".", "Session", "(", "zhmc", ",", "USERID", ",", "PASSWORD", ")", "session", ".", "logon", "(", ")", "client", "=", "zhmcclient", ".", "Client", "(", "session", ")", "print", "(", "'Established logged-on session with HMC {} using userid {}'", ".", "format", "(", "zhmc", ",", "USERID", ")", ")", "return", "client" ]
Get the DC description of the object
def get_description ( self , lang = None ) : return self . metadata . get_single ( key = RDF_NAMESPACES . CTS . description , lang = lang )
7,185
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L360-L367
[ "def", "paintGL", "(", "self", ")", ":", "if", "self", ".", "post_processing", ":", "# Render to the first framebuffer", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "self", ".", "fb0", ")", "glViewport", "(", "0", ",", "0", ",", "self", ".", "width", "(", ")", ",", "self", ".", "height", "(", ")", ")", "status", "=", "glCheckFramebufferStatus", "(", "GL_FRAMEBUFFER", ")", "if", "(", "status", "!=", "GL_FRAMEBUFFER_COMPLETE", ")", ":", "reason", "=", "dict", "(", "GL_FRAMEBUFFER_UNDEFINED", "=", "'UNDEFINED'", ",", "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT", "=", "'INCOMPLETE_ATTACHMENT'", ",", "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT", "=", "'INCOMPLETE_MISSING_ATTACHMENT'", ",", "GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER", "=", "'INCOMPLETE_DRAW_BUFFER'", ",", "GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER", "=", "'INCOMPLETE_READ_BUFFER'", ",", "GL_FRAMEBUFFER_UNSUPPORTED", "=", "'UNSUPPORTED'", ",", ")", "[", "status", "]", "raise", "Exception", "(", "'Framebuffer is not complete: {}'", ".", "format", "(", "reason", ")", ")", "else", ":", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "DEFAULT_FRAMEBUFFER", ")", "# Clear color take floats", "bg_r", ",", "bg_g", ",", "bg_b", ",", "bg_a", "=", "self", ".", "background_color", "glClearColor", "(", "bg_r", "/", "255", ",", "bg_g", "/", "255", ",", "bg_b", "/", "255", ",", "bg_a", "/", "255", ")", "glClear", "(", "GL_COLOR_BUFFER_BIT", "|", "GL_DEPTH_BUFFER_BIT", ")", "proj", "=", "self", ".", "camera", ".", "projection", "cam", "=", "self", ".", "camera", ".", "matrix", "self", ".", "mvproj", "=", "np", ".", "dot", "(", "proj", ",", "cam", ")", "self", ".", "ldir", "=", "cam", "[", ":", "3", ",", ":", "3", "]", ".", "T", ".", "dot", "(", "self", ".", "light_dir", ")", "# Draw World", "self", ".", "on_draw_world", "(", ")", "# Iterate over all of the post processing effects", "if", "self", ".", "post_processing", ":", "if", "len", "(", "self", ".", "post_processing", ")", ">", "1", ":", "newarg", "=", "self", ".", "textures", ".", "copy", "(", ")", "# Ping-pong framebuffer rendering", "for", "i", ",", "pp", "in", "enumerate", "(", "self", ".", "post_processing", "[", ":", "-", "1", "]", ")", ":", "if", "i", "%", "2", ":", "outfb", "=", "self", ".", "fb1", "outtex", "=", "self", ".", "_extra_textures", "[", "'fb1'", "]", "else", ":", "outfb", "=", "self", ".", "fb2", "outtex", "=", "self", ".", "_extra_textures", "[", "'fb2'", "]", "pp", ".", "render", "(", "outfb", ",", "newarg", ")", "newarg", "[", "'color'", "]", "=", "outtex", "self", ".", "post_processing", "[", "-", "1", "]", ".", "render", "(", "DEFAULT_FRAMEBUFFER", ",", "newarg", ")", "else", ":", "self", ".", "post_processing", "[", "0", "]", ".", "render", "(", "DEFAULT_FRAMEBUFFER", ",", "self", ".", "textures", ")", "# Draw the UI at the very last step", "self", ".", "on_draw_ui", "(", ")" ]
Languages this text is in
def lang ( self ) : return str ( self . graph . value ( self . asNode ( ) , DC . language ) )
7,186
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L461-L466
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Language this text is available in
def lang ( self , lang ) : self . graph . set ( ( self . asNode ( ) , DC . language , Literal ( lang ) ) )
7,187
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L469-L475
[ "def", "increase_frequency", "(", "self", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"increaseFrequency\"", ",", "\"()V\"", ")", "else", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"increaseFrequency\"", ",", "\"(I)V\"", ",", "frequency", ")" ]
Merge two XmlCtsWorkMetadata Objects .
def update ( self , other ) : if not isinstance ( other , CtsWorkMetadata ) : raise TypeError ( "Cannot add %s to CtsWorkMetadata" % type ( other ) ) elif self . urn != other . urn : raise InvalidURN ( "Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s " % ( self . urn , other . urn ) ) for urn , text in other . children . items ( ) : self . texts [ urn ] = text self . texts [ urn ] . parent = self self . texts [ urn ] . resource = None return self
7,188
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L477-L498
[ "def", "_update_netrc", "(", "self", ",", "netrc_path", ",", "auth_token", ",", "account_email", ")", ":", "# define patterns\r", "import", "re", "record_end", "=", "'(\\n\\n|\\n\\w|$)'", "heroku_regex", "=", "re", ".", "compile", "(", "'(machine\\sapi\\.heroku\\.com.*?\\nmachine\\sgit\\.heroku\\.com.*?)%s'", "%", "record_end", ",", "re", ".", "S", ")", "# retrieve netrc text\r", "netrc_text", "=", "open", "(", "netrc_path", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "# replace text with new password and login\r", "new_heroku", "=", "'machine api.heroku.com\\n password %s\\n login %s\\n'", "%", "(", "auth_token", ",", "account_email", ")", "new_heroku", "+=", "'machine git.heroku.com\\n password %s\\n login %s\\n\\n'", "%", "(", "auth_token", ",", "account_email", ")", "heroku_search", "=", "heroku_regex", ".", "findall", "(", "netrc_text", ")", "if", "heroku_search", ":", "if", "re", ".", "match", "(", "'\\n\\w'", ",", "heroku_search", "[", "0", "]", "[", "1", "]", ")", ":", "new_heroku", "=", "new_heroku", "[", ":", "-", "1", "]", "new_heroku", "+=", "heroku_search", "[", "0", "]", "[", "1", "]", "netrc_text", "=", "heroku_regex", ".", "sub", "(", "new_heroku", ",", "netrc_text", ")", "else", ":", "netrc_text", "+=", "'\\n\\n'", "+", "new_heroku", "# save netrc\r", "with", "open", "(", "netrc_path", ",", "'wt'", ")", "as", "f", ":", "f", ".", "write", "(", "netrc_text", ")", "f", ".", "close", "(", ")", "return", "netrc_text" ]
Find a translation with given language
def get_translation_in ( self , key = None ) : if key is not None : return [ item for item in self . texts . values ( ) if isinstance ( item , CtsTranslationMetadata ) and item . lang == key ] else : return [ item for item in self . texts . values ( ) if isinstance ( item , CtsTranslationMetadata ) ]
7,189
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L500-L519
[ "def", "wncomd", "(", "left", ",", "right", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "left", "=", "ctypes", ".", "c_double", "(", "left", ")", "right", "=", "ctypes", ".", "c_double", "(", "right", ")", "result", "=", "stypes", ".", "SpiceCell", ".", "double", "(", "window", ".", "size", ")", "libspice", ".", "wncomd_c", "(", "left", ",", "right", ",", "ctypes", ".", "byref", "(", "window", ")", ",", "result", ")", "return", "result" ]
Merge two Textgroup Objects .
def update ( self , other ) : if not isinstance ( other , CtsTextgroupMetadata ) : raise TypeError ( "Cannot add %s to CtsTextgroupMetadata" % type ( other ) ) elif str ( self . urn ) != str ( other . urn ) : raise InvalidURN ( "Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % ( self . urn , other . urn ) ) for urn , work in other . works . items ( ) : if urn in self . works : self . works [ urn ] . update ( deepcopy ( work ) ) else : self . works [ urn ] = deepcopy ( work ) self . works [ urn ] . parent = self self . works [ urn ] . resource = None return self
7,190
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L581-L605
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Get triggers with optional filtering . Querying without parameters returns all the trigger definitions .
def get ( self , tags = [ ] , trigger_ids = [ ] ) : params = { } if len ( tags ) > 0 : params [ 'tags' ] = ',' . join ( tags ) if len ( trigger_ids ) > 0 : params [ 'triggerIds' ] = ',' . join ( trigger_ids ) url = self . _service_url ( 'triggers' , params = params ) triggers_dict = self . _get ( url ) return Trigger . list_to_object_list ( triggers_dict )
7,191
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L132-L148
[ "def", "login", "(", "self", ",", "session", "=", "None", ")", ":", "session", "=", "session", "or", "self", ".", "client", ".", "session", "try", ":", "response", "=", "session", ".", "get", "(", "self", ".", "login_url", ")", "except", "ConnectionError", ":", "raise", "APIError", "(", "None", ",", "self", ".", "login_url", ",", "None", ",", "'ConnectionError'", ")", "except", "Exception", "as", "e", ":", "raise", "APIError", "(", "None", ",", "self", ".", "login_url", ",", "None", ",", "e", ")", "app_key", "=", "re", ".", "findall", "(", "r'''\"appKey\":\\s\"(.*?)\"'''", ",", "response", ".", "text", ")", "if", "app_key", ":", "self", ".", "app_key", "=", "app_key", "[", "0", "]", "else", ":", "raise", "RaceCardError", "(", "\"Unable to find appKey\"", ")" ]
Create a new trigger .
def create ( self , trigger ) : data = self . _serialize_object ( trigger ) if isinstance ( trigger , FullTrigger ) : returned_dict = self . _post ( self . _service_url ( [ 'triggers' , 'trigger' ] ) , data ) return FullTrigger ( returned_dict ) else : returned_dict = self . _post ( self . _service_url ( 'triggers' ) , data ) return Trigger ( returned_dict )
7,192
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L150-L163
[ "async", "def", "close_async", "(", "self", ")", ":", "if", "self", ".", "message_handler", ":", "await", "self", ".", "message_handler", ".", "destroy_async", "(", ")", "self", ".", "message_handler", "=", "None", "self", ".", "_shutdown", "=", "True", "if", "self", ".", "_keep_alive_thread", ":", "await", "self", ".", "_keep_alive_thread", "self", ".", "_keep_alive_thread", "=", "None", "if", "not", "self", ".", "_session", ":", "return", "# already closed.", "if", "not", "self", ".", "_connection", ".", "cbs", ":", "_logger", ".", "info", "(", "\"Closing non-CBS session.\"", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_session", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"CBS session pending %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "self", ".", "_session", "=", "None", "if", "not", "self", ".", "_ext_connection", ":", "_logger", ".", "info", "(", "\"Closing exclusive connection %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_connection", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"Shared connection remaining open.\"", ")", "self", ".", "_connection", "=", "None" ]
Update an existing full trigger .
def update ( self , trigger_id , full_trigger ) : data = self . _serialize_object ( full_trigger ) rdict = self . _put ( self . _service_url ( [ 'triggers' , 'trigger' , trigger_id ] ) , data ) return FullTrigger ( rdict )
7,193
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L165-L175
[ "def", "_clone", "(", "self", ",", "cid", ")", ":", "try", ":", "iid", "=", "self", ".", "client", ".", "commit", "(", "container", "=", "cid", ",", "conf", "=", "{", "'Labels'", ":", "{", "'io.projectatomic.Temporary'", ":", "'true'", "}", "}", ")", "[", "'Id'", "]", "except", "docker", ".", "errors", ".", "APIError", "as", "ex", ":", "raise", "MountError", "(", "str", "(", "ex", ")", ")", "self", ".", "tmp_image", "=", "iid", "return", "self", ".", "_create_temp_container", "(", "iid", ")" ]
Create a new group trigger .
def create_group ( self , trigger ) : data = self . _serialize_object ( trigger ) return Trigger ( self . _post ( self . _service_url ( [ 'triggers' , 'groups' ] ) , data ) )
7,194
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L202-L210
[ "async", "def", "close_async", "(", "self", ")", ":", "if", "self", ".", "message_handler", ":", "await", "self", ".", "message_handler", ".", "destroy_async", "(", ")", "self", ".", "message_handler", "=", "None", "self", ".", "_shutdown", "=", "True", "if", "self", ".", "_keep_alive_thread", ":", "await", "self", ".", "_keep_alive_thread", "self", ".", "_keep_alive_thread", "=", "None", "if", "not", "self", ".", "_session", ":", "return", "# already closed.", "if", "not", "self", ".", "_connection", ".", "cbs", ":", "_logger", ".", "info", "(", "\"Closing non-CBS session.\"", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_session", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"CBS session pending %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "self", ".", "_session", "=", "None", "if", "not", "self", ".", "_ext_connection", ":", "_logger", ".", "info", "(", "\"Closing exclusive connection %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "await", "asyncio", ".", "shield", "(", "self", ".", "_connection", ".", "destroy_async", "(", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"Shared connection remaining open.\"", ")", "self", ".", "_connection", "=", "None" ]
Find all group member trigger definitions
def group_members ( self , group_id , include_orphans = False ) : params = { 'includeOrphans' : str ( include_orphans ) . lower ( ) } url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'members' ] , params = params ) return Trigger . list_to_object_list ( self . _get ( url ) )
7,195
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L212-L222
[ "def", "addNoiseToVector", "(", "inputVector", ",", "noiseLevel", ",", "vectorType", ")", ":", "if", "vectorType", "==", "'sparse'", ":", "corruptSparseVector", "(", "inputVector", ",", "noiseLevel", ")", "elif", "vectorType", "==", "'dense'", ":", "corruptDenseVector", "(", "inputVector", ",", "noiseLevel", ")", "else", ":", "raise", "ValueError", "(", "\"vectorType must be 'sparse' or 'dense' \"", ")" ]
Update an existing group trigger definition and its member definitions .
def update_group ( self , group_id , trigger ) : data = self . _serialize_object ( trigger ) self . _put ( self . _service_url ( [ 'triggers' , 'groups' , group_id ] ) , data , parse_json = False )
7,196
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L224-L232
[ "def", "addNoiseToVector", "(", "inputVector", ",", "noiseLevel", ",", "vectorType", ")", ":", "if", "vectorType", "==", "'sparse'", ":", "corruptSparseVector", "(", "inputVector", ",", "noiseLevel", ")", "elif", "vectorType", "==", "'dense'", ":", "corruptDenseVector", "(", "inputVector", ",", "noiseLevel", ")", "else", ":", "raise", "ValueError", "(", "\"vectorType must be 'sparse' or 'dense' \"", ")" ]
Delete a group trigger
def delete_group ( self , group_id , keep_non_orphans = False , keep_orphans = False ) : params = { 'keepNonOrphans' : str ( keep_non_orphans ) . lower ( ) , 'keepOrphans' : str ( keep_orphans ) . lower ( ) } self . _delete ( self . _service_url ( [ 'triggers' , 'groups' , group_id ] , params = params ) )
7,197
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L234-L243
[ "def", "create_session", "(", "self", ",", "kind", ":", "SessionKind", ",", "proxy_user", ":", "str", "=", "None", ",", "jars", ":", "List", "[", "str", "]", "=", "None", ",", "py_files", ":", "List", "[", "str", "]", "=", "None", ",", "files", ":", "List", "[", "str", "]", "=", "None", ",", "driver_memory", ":", "str", "=", "None", ",", "driver_cores", ":", "int", "=", "None", ",", "executor_memory", ":", "str", "=", "None", ",", "executor_cores", ":", "int", "=", "None", ",", "num_executors", ":", "int", "=", "None", ",", "archives", ":", "List", "[", "str", "]", "=", "None", ",", "queue", ":", "str", "=", "None", ",", "name", ":", "str", "=", "None", ",", "spark_conf", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", ")", "->", "Session", ":", "if", "self", ".", "legacy_server", "(", ")", ":", "valid_kinds", "=", "VALID_LEGACY_SESSION_KINDS", "else", ":", "valid_kinds", "=", "VALID_SESSION_KINDS", "if", "kind", "not", "in", "valid_kinds", ":", "raise", "ValueError", "(", "f\"{kind} is not a valid session kind for a Livy server of \"", "f\"this version (should be one of {valid_kinds})\"", ")", "body", "=", "{", "\"kind\"", ":", "kind", ".", "value", "}", "if", "proxy_user", "is", "not", "None", ":", "body", "[", "\"proxyUser\"", "]", "=", "proxy_user", "if", "jars", "is", "not", "None", ":", "body", "[", "\"jars\"", "]", "=", "jars", "if", "py_files", "is", "not", "None", ":", "body", "[", "\"pyFiles\"", "]", "=", "py_files", "if", "files", "is", "not", "None", ":", "body", "[", "\"files\"", "]", "=", "files", "if", "driver_memory", "is", "not", "None", ":", "body", "[", "\"driverMemory\"", "]", "=", "driver_memory", "if", "driver_cores", "is", "not", "None", ":", "body", "[", "\"driverCores\"", "]", "=", "driver_cores", "if", "executor_memory", "is", "not", "None", ":", "body", "[", "\"executorMemory\"", "]", "=", "executor_memory", "if", "executor_cores", "is", "not", "None", ":", "body", "[", "\"executorCores\"", "]", "=", "executor_cores", "if", "num_executors", "is", "not", "None", ":", "body", "[", "\"numExecutors\"", "]", "=", "num_executors", "if", "archives", "is", "not", "None", ":", "body", "[", "\"archives\"", "]", "=", "archives", "if", "queue", "is", "not", "None", ":", "body", "[", "\"queue\"", "]", "=", "queue", "if", "name", "is", "not", "None", ":", "body", "[", "\"name\"", "]", "=", "name", "if", "spark_conf", "is", "not", "None", ":", "body", "[", "\"conf\"", "]", "=", "spark_conf", "data", "=", "self", ".", "_client", ".", "post", "(", "\"/sessions\"", ",", "data", "=", "body", ")", "return", "Session", ".", "from_json", "(", "data", ")" ]
Create a new member trigger for a parent trigger .
def create_group_member ( self , member ) : data = self . _serialize_object ( member ) return Trigger ( self . _post ( self . _service_url ( [ 'triggers' , 'groups' , 'members' ] ) , data ) )
7,198
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L245-L254
[ "def", "read_stb", "(", "library", ",", "session", ")", ":", "status", "=", "ViUInt16", "(", ")", "ret", "=", "library", ".", "viReadSTB", "(", "session", ",", "byref", "(", "status", ")", ")", "return", "status", ".", "value", ",", "ret" ]
Set the group conditions .
def set_group_conditions ( self , group_id , conditions , trigger_mode = None ) : data = self . _serialize_object ( conditions ) if trigger_mode is not None : url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'conditions' , trigger_mode ] ) else : url = self . _service_url ( [ 'triggers' , 'groups' , group_id , 'conditions' ] ) response = self . _put ( url , data ) return Condition . list_to_object_list ( response )
7,199
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L256-L277
[ "def", "send_zipfile", "(", "request", ",", "fileList", ")", ":", "temp", "=", "tempfile", ".", "TemporaryFile", "(", ")", "archive", "=", "zipfile", ".", "ZipFile", "(", "temp", ",", "'w'", ",", "zipfile", ".", "ZIP_DEFLATED", ")", "for", "artist", ",", "files", "in", "fileList", ".", "iteritems", "(", ")", ":", "for", "f", "in", "files", ":", "archive", ".", "write", "(", "f", "[", "0", "]", ",", "'%s/%s'", "%", "(", "artist", ",", "f", "[", "1", "]", ")", ")", "archive", ".", "close", "(", ")", "wrapper", "=", "FixedFileWrapper", "(", "temp", ")", "response", "=", "HttpResponse", "(", "wrapper", ",", "content_type", "=", "'application/zip'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=FrogSources.zip'", "response", "[", "'Content-Length'", "]", "=", "temp", ".", "tell", "(", ")", "temp", ".", "seek", "(", "0", ")", "return", "response" ]