query
stringlengths 5
1.23k
| positive
stringlengths 53
15.2k
| id_
int64 0
252k
| task_name
stringlengths 87
242
| negative
listlengths 20
553
|
|---|---|---|---|---|
Orographic Gage File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Open file and parse into HmetRecords with open ( path , 'r' ) as orthoFile : for line in orthoFile : sline = line . strip ( ) . split ( ) # Cases if sline [ 0 ] . lower ( ) == 'num_sites:' : self . numSites = sline [ 1 ] elif sline [ 0 ] . lower ( ) == 'elev_base' : self . elevBase = sline [ 1 ] elif sline [ 0 ] . lower ( ) == 'elev_2' : self . elev2 = sline [ 1 ] elif sline [ 0 ] . lower ( ) == 'year' : """DO NOTHING""" else : # Create datetime object dateTime = datetime ( year = int ( sline [ 0 ] ) , month = int ( sline [ 1 ] ) , day = int ( sline [ 2 ] ) , hour = int ( sline [ 3 ] ) ) # Create GSSHAPY OrthoMeasurement object measurement = OrographicMeasurement ( dateTime = dateTime , temp2 = sline [ 4 ] ) # Associate OrthoMeasurement with OrthographicGageFile self . orographicMeasurements . append ( measurement )
| 4,800
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L211-L244
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Orographic Gage File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write lines openFile . write ( 'Num_Sites: %s\n' % self . numSites ) openFile . write ( 'Elev_Base %s\n' % self . elevBase ) openFile . write ( 'Elev_2 %s\n' % self . elev2 ) openFile . write ( 'Year Month Day Hour Temp_2\n' ) # Retrieve OrographicMeasurements measurements = self . orographicMeasurements for measurement in measurements : dateTime = measurement . dateTime openFile . write ( '%s%s%s%s%s%s%s%s%.3f\n' % ( dateTime . year , ' ' , dateTime . month , ' ' * ( 8 - len ( str ( dateTime . month ) ) ) , dateTime . day , ' ' * ( 8 - len ( str ( dateTime . day ) ) ) , dateTime . hour , ' ' * ( 8 - len ( str ( dateTime . hour ) ) ) , measurement . temp2 ) )
| 4,801
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L246-L270
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Retrieve only the links that represent fluvial portions of the stream . Returns a list of StreamLink instances .
|
def getFluvialLinks ( self ) : # Define fluvial types fluvialTypeKeywords = ( 'TRAPEZOID' , 'TRAP' , 'BREAKPOINT' , 'ERODE' , 'SUBSURFACE' ) fluvialLinks = [ ] for link in self . streamLinks : for fluvialTypeKeyword in fluvialTypeKeywords : if fluvialTypeKeyword in link . type : fluvialLinks . append ( link ) break return fluvialLinks
| 4,802
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L93-L111
|
[
"def",
"execute",
"(",
"self",
",",
"driver_command",
",",
"params",
"=",
"None",
")",
":",
"if",
"self",
".",
"session_id",
"is",
"not",
"None",
":",
"if",
"not",
"params",
":",
"params",
"=",
"{",
"'sessionId'",
":",
"self",
".",
"session_id",
"}",
"elif",
"'sessionId'",
"not",
"in",
"params",
":",
"params",
"[",
"'sessionId'",
"]",
"=",
"self",
".",
"session_id",
"params",
"=",
"self",
".",
"_wrap_value",
"(",
"params",
")",
"response",
"=",
"self",
".",
"command_executor",
".",
"execute",
"(",
"driver_command",
",",
"params",
")",
"if",
"response",
":",
"self",
".",
"error_handler",
".",
"check_response",
"(",
"response",
")",
"response",
"[",
"'value'",
"]",
"=",
"self",
".",
"_unwrap_value",
"(",
"response",
".",
"get",
"(",
"'value'",
",",
"None",
")",
")",
"return",
"response",
"# If the server doesn't send a response, assume the command was",
"# a success",
"return",
"{",
"'success'",
":",
"0",
",",
"'value'",
":",
"None",
",",
"'sessionId'",
":",
"self",
".",
"session_id",
"}"
] |
Retrieve the links in the order of the link number .
|
def getOrderedLinks ( self , session ) : streamLinks = session . query ( StreamLink ) . filter ( StreamLink . channelInputFile == self ) . order_by ( StreamLink . linkNumber ) . all ( ) return streamLinks
| 4,803
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L113-L128
|
[
"def",
"win32_refresh_window",
"(",
"cls",
")",
":",
"# Get console handle",
"handle",
"=",
"windll",
".",
"kernel32",
".",
"GetConsoleWindow",
"(",
")",
"RDW_INVALIDATE",
"=",
"0x0001",
"windll",
".",
"user32",
".",
"RedrawWindow",
"(",
"handle",
",",
"None",
",",
"None",
",",
"c_uint",
"(",
"RDW_INVALIDATE",
")",
")"
] |
Retrieve the stream network geometry in Well Known Text format .
|
def getStreamNetworkAsWkt ( self , session , withNodes = True ) : wkt_list = [ ] for link in self . streamLinks : wkt_link = link . getAsWkt ( session ) if wkt_link : wkt_list . append ( wkt_link ) if withNodes : for node in link . nodes : wkt_node = node . getAsWkt ( session ) if wkt_node : wkt_list . append ( wkt_node ) return 'GEOMCOLLECTION ({0})' . format ( ', ' . join ( wkt_list ) )
| 4,804
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L303-L329
|
[
"def",
"add",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Secret",
")",
"and",
"resource",
".",
"mount",
"!=",
"'cubbyhole'",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
",",
"False",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Mount",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Auth",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"AuthBackend",
",",
"self",
".",
"_auths",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"AuditLog",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"LogBackend",
",",
"self",
".",
"_logs",
",",
"self",
".",
"opt",
")",
"self",
".",
"_resources",
".",
"append",
"(",
"resource",
")",
"else",
":",
"msg",
"=",
"\"Unknown resource %s being \"",
"\"added to context\"",
"%",
"resource",
".",
"__class__",
"raise",
"aomi_excep",
".",
"AomiError",
"(",
"msg",
")"
] |
Retrieve the stream network geometry in GeoJSON format .
|
def getStreamNetworkAsGeoJson ( self , session , withNodes = True ) : features_list = [ ] # Assemble link features for link in self . streamLinks : link_geoJson = link . getAsGeoJson ( session ) if link_geoJson : link_geometry = json . loads ( link . getAsGeoJson ( session ) ) link_properties = { "link_number" : link . linkNumber , "type" : link . type , "num_elements" : link . numElements , "dx" : link . dx , "erode" : link . erode , "subsurface" : link . subsurface } link_feature = { "type" : "Feature" , "geometry" : link_geometry , "properties" : link_properties , "id" : link . id } features_list . append ( link_feature ) # Assemble node features if withNodes : for node in link . nodes : node_geoJson = node . getAsGeoJson ( session ) if node_geoJson : node_geometry = json . loads ( node_geoJson ) node_properties = { "link_number" : link . linkNumber , "node_number" : node . nodeNumber , "elevation" : node . elevation } node_feature = { "type" : "Feature" , "geometry" : node_geometry , "properties" : node_properties , "id" : node . id } features_list . append ( node_feature ) feature_collection = { "type" : "FeatureCollection" , "features" : features_list } return json . dumps ( feature_collection )
| 4,805
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L331-L387
|
[
"def",
"add",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Secret",
")",
"and",
"resource",
".",
"mount",
"!=",
"'cubbyhole'",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
",",
"False",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Mount",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Auth",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"AuthBackend",
",",
"self",
".",
"_auths",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"AuditLog",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"LogBackend",
",",
"self",
".",
"_logs",
",",
"self",
".",
"opt",
")",
"self",
".",
"_resources",
".",
"append",
"(",
"resource",
")",
"else",
":",
"msg",
"=",
"\"Unknown resource %s being \"",
"\"added to context\"",
"%",
"resource",
".",
"__class__",
"raise",
"aomi_excep",
".",
"AomiError",
"(",
"msg",
")"
] |
Channel Input File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = { 'ALPHA' : cic . cardChunk , 'BETA' : cic . cardChunk , 'THETA' : cic . cardChunk , 'LINKS' : cic . cardChunk , 'MAXNODES' : cic . cardChunk , 'CONNECT' : cic . connectChunk , 'LINK' : cic . linkChunk } links = [ ] connectivity = [ ] # Parse file into chunks associated with keywords/cards with open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Call chunk specific parsers for each chunk result = KEYWORDS [ key ] ( key , chunk ) # Cases if key == 'LINK' : # Link handler links . append ( self . _createLink ( result , replaceParamFile ) ) elif key == 'CONNECT' : # Connectivity handler connectivity . append ( result ) else : # Global variable handler card = result [ 'card' ] value = result [ 'values' ] [ 0 ] # Cases if card == 'LINKS' : self . links = int ( value ) elif card == 'MAXNODES' : self . maxNodes = int ( value ) elif card == 'ALPHA' : self . alpha = float ( vrp ( value , replaceParamFile ) ) elif card == 'BETA' : self . beta = float ( vrp ( value , replaceParamFile ) ) elif card == 'THETA' : self . theta = float ( vrp ( value , replaceParamFile ) ) self . _createConnectivity ( linkList = links , connectList = connectivity ) if spatial : self . _createGeometry ( session , spatialReferenceID )
| 4,806
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L389-L447
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Channel Input File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write lines openFile . write ( 'GSSHA_CHAN\n' ) alpha = vwp ( self . alpha , replaceParamFile ) try : openFile . write ( 'ALPHA%s%.6f\n' % ( ' ' * 7 , alpha ) ) except : openFile . write ( 'ALPHA%s%s\n' % ( ' ' * 7 , alpha ) ) beta = vwp ( self . beta , replaceParamFile ) try : openFile . write ( 'BETA%s%.6f\n' % ( ' ' * 8 , beta ) ) except : openFile . write ( 'BETA%s%s\n' % ( ' ' * 8 , beta ) ) theta = vwp ( self . theta , replaceParamFile ) try : openFile . write ( 'THETA%s%.6f\n' % ( ' ' * 7 , theta ) ) except : openFile . write ( 'THETA%s%s\n' % ( ' ' * 7 , theta ) ) openFile . write ( 'LINKS%s%s\n' % ( ' ' * 7 , self . links ) ) openFile . write ( 'MAXNODES%s%s\n' % ( ' ' * 4 , self . maxNodes ) ) # Retrieve StreamLinks links = self . getOrderedLinks ( session ) self . _writeConnectivity ( links = links , fileObject = openFile ) self . _writeLinks ( links = links , fileObject = openFile , replaceParamFile = replaceParamFile )
| 4,807
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L449-L484
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Create GSSHAPY Link Object Method
|
def _createLink ( self , linkResult , replaceParamFile ) : link = None # Cases if linkResult [ 'type' ] == 'XSEC' : # Cross section link handler link = self . _createCrossSection ( linkResult , replaceParamFile ) elif linkResult [ 'type' ] == 'STRUCTURE' : # Structure link handler link = self . _createStructure ( linkResult , replaceParamFile ) elif linkResult [ 'type' ] in ( 'RESERVOIR' , 'LAKE' ) : # Reservoir/lake handler link = self . _createReservoir ( linkResult , replaceParamFile ) return link
| 4,808
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L486-L505
|
[
"def",
"flush",
"(",
"self",
")",
":",
"writer",
"=",
"self",
".",
"writer",
"if",
"writer",
"is",
"None",
":",
"raise",
"GaugedUseAfterFreeError",
"self",
".",
"flush_writer_position",
"(",
")",
"keys",
"=",
"self",
".",
"translate_keys",
"(",
")",
"blocks",
"=",
"[",
"]",
"current_block",
"=",
"self",
".",
"current_block",
"statistics",
"=",
"self",
".",
"statistics",
"driver",
"=",
"self",
".",
"driver",
"flags",
"=",
"0",
"# for future extensions, e.g. block compression",
"for",
"namespace",
",",
"key",
",",
"block",
"in",
"self",
".",
"pending_blocks",
"(",
")",
":",
"length",
"=",
"block",
".",
"byte_length",
"(",
")",
"if",
"not",
"length",
":",
"continue",
"key_id",
"=",
"keys",
"[",
"(",
"namespace",
",",
"key",
")",
"]",
"statistics",
"[",
"namespace",
"]",
".",
"byte_count",
"+=",
"length",
"blocks",
".",
"append",
"(",
"(",
"namespace",
",",
"current_block",
",",
"key_id",
",",
"block",
".",
"buffer",
"(",
")",
",",
"flags",
")",
")",
"if",
"self",
".",
"config",
".",
"overwrite_blocks",
":",
"driver",
".",
"replace_blocks",
"(",
"blocks",
")",
"else",
":",
"driver",
".",
"insert_or_append_blocks",
"(",
"blocks",
")",
"if",
"not",
"Gauged",
".",
"writer_flush_maps",
"(",
"writer",
",",
"True",
")",
":",
"raise",
"MemoryError",
"update_namespace",
"=",
"driver",
".",
"add_namespace_statistics",
"for",
"namespace",
",",
"stats",
"in",
"statistics",
".",
"iteritems",
"(",
")",
":",
"update_namespace",
"(",
"namespace",
",",
"self",
".",
"current_block",
",",
"stats",
".",
"data_points",
",",
"stats",
".",
"byte_count",
")",
"statistics",
".",
"clear",
"(",
")",
"driver",
".",
"commit",
"(",
")",
"self",
".",
"flush_now",
"=",
"False"
] |
Create GSSHAPY Connect Object Method
|
def _createConnectivity ( self , linkList , connectList ) : # Create StreamLink-Connectivity Pairs for idx , link in enumerate ( linkList ) : connectivity = connectList [ idx ] # Initialize GSSHAPY UpstreamLink objects for upLink in connectivity [ 'upLinks' ] : upstreamLink = UpstreamLink ( upstreamLinkID = int ( upLink ) ) upstreamLink . streamLink = link link . downstreamLinkID = int ( connectivity [ 'downLink' ] ) link . numUpstreamLinks = int ( connectivity [ 'numUpLinks' ] )
| 4,809
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L508-L524
|
[
"def",
"label_to_latex",
"(",
"text",
")",
":",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"text",
"is",
"None",
":",
"return",
"''",
"out",
"=",
"[",
"]",
"x",
"=",
"None",
"# loop over matches in reverse order and replace",
"for",
"m",
"in",
"re_latex_control",
".",
"finditer",
"(",
"text",
")",
":",
"a",
",",
"b",
"=",
"m",
".",
"span",
"(",
")",
"char",
"=",
"m",
".",
"group",
"(",
")",
"[",
"0",
"]",
"out",
".",
"append",
"(",
"text",
"[",
"x",
":",
"a",
"]",
")",
"out",
".",
"append",
"(",
"r'\\%s'",
"%",
"char",
")",
"x",
"=",
"b",
"if",
"not",
"x",
":",
"# no match",
"return",
"text",
"# append prefix and return joined components",
"out",
".",
"append",
"(",
"text",
"[",
"b",
":",
"]",
")",
"return",
"''",
".",
"join",
"(",
"out",
")"
] |
Create GSSHAPY Cross Section Objects Method
|
def _createCrossSection ( self , linkResult , replaceParamFile ) : # Extract header variables from link result object header = linkResult [ 'header' ] # Initialize GSSHAPY StreamLink object link = StreamLink ( linkNumber = int ( header [ 'link' ] ) , type = header [ 'xSecType' ] , numElements = header [ 'nodes' ] , dx = vrp ( header [ 'dx' ] , replaceParamFile ) , erode = header [ 'erode' ] , subsurface = header [ 'subsurface' ] ) # Associate StreamLink with ChannelInputFile link . channelInputFile = self # Initialize GSSHAPY TrapezoidalCS or BreakpointCS objects xSection = linkResult [ 'xSection' ] # Cases if 'TRAPEZOID' in link . type or 'TRAP' in link . type : # Trapezoid cross section handler # Initialize GSSHPY TrapeziodalCS object trapezoidCS = TrapezoidalCS ( mannings_n = vrp ( xSection [ 'mannings_n' ] , replaceParamFile ) , bottomWidth = vrp ( xSection [ 'bottom_width' ] , replaceParamFile ) , bankfullDepth = vrp ( xSection [ 'bankfull_depth' ] , replaceParamFile ) , sideSlope = vrp ( xSection [ 'side_slope' ] , replaceParamFile ) , mRiver = vrp ( xSection [ 'm_river' ] , replaceParamFile ) , kRiver = vrp ( xSection [ 'k_river' ] , replaceParamFile ) , erode = xSection [ 'erode' ] , subsurface = xSection [ 'subsurface' ] , maxErosion = vrp ( xSection [ 'max_erosion' ] , replaceParamFile ) ) # Associate TrapezoidalCS with StreamLink trapezoidCS . streamLink = link elif 'BREAKPOINT' in link . type : # Breakpoint cross section handler # Initialize GSSHAPY BreakpointCS objects breakpointCS = BreakpointCS ( mannings_n = vrp ( xSection [ 'mannings_n' ] , replaceParamFile ) , numPairs = xSection [ 'npairs' ] , numInterp = vrp ( xSection [ 'num_interp' ] , replaceParamFile ) , mRiver = vrp ( xSection [ 'm_river' ] , replaceParamFile ) , kRiver = vrp ( xSection [ 'k_river' ] , replaceParamFile ) , erode = xSection [ 'erode' ] , subsurface = xSection [ 'subsurface' ] , maxErosion = vrp ( xSection [ 'max_erosion' ] , replaceParamFile ) ) # Associate BreakpointCS with StreamLink breakpointCS . streamLink = link # Create GSSHAPY Breakpoint objects for b in xSection [ 'breakpoints' ] : breakpoint = Breakpoint ( x = b [ 'x' ] , y = b [ 'y' ] ) # Associate Breakpoint with BreakpointCS breakpoint . crossSection = breakpointCS # Initialize GSSHAPY StreamNode objects for n in linkResult [ 'nodes' ] : # Initialize GSSHAPY StreamNode object node = StreamNode ( nodeNumber = int ( n [ 'node' ] ) , x = n [ 'x' ] , y = n [ 'y' ] , elevation = n [ 'elev' ] ) # Associate StreamNode with StreamLink node . streamLink = link return link
| 4,810
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L527-L599
|
[
"def",
"compute_q",
"(",
"self",
",",
"query_antecedent",
")",
":",
"ret",
"=",
"mtf",
".",
"einsum",
"(",
"[",
"query_antecedent",
",",
"self",
".",
"wq",
"]",
",",
"reduced_dims",
"=",
"[",
"self",
".",
"query_input_dim",
"]",
")",
"if",
"self",
".",
"combine_dims",
":",
"ret",
"=",
"mtf",
".",
"replace_dimensions",
"(",
"ret",
",",
"ret",
".",
"shape",
".",
"dims",
"[",
"-",
"1",
"]",
",",
"self",
".",
"q_dims",
")",
"return",
"ret"
] |
Create GSSHAPY Structure Objects Method
|
def _createStructure ( self , linkResult , replaceParamFile ) : # Constants WEIRS = ( 'WEIR' , 'SAG_WEIR' ) CULVERTS = ( 'ROUND_CULVERT' , 'RECT_CULVERT' ) CURVES = ( 'RATING_CURVE' , 'SCHEDULED_RELEASE' , 'RULE_CURVE' ) header = linkResult [ 'header' ] # Initialize GSSHAPY StreamLink object link = StreamLink ( linkNumber = header [ 'link' ] , type = linkResult [ 'type' ] , numElements = header [ 'numstructs' ] ) # Associate StreamLink with ChannelInputFile link . channelInputFile = self # Create Structure objects for s in linkResult [ 'structures' ] : structType = s [ 'structtype' ] # Cases if structType in WEIRS : # Weir type handler # Initialize GSSHAPY Weir object weir = Weir ( type = structType , crestLength = vrp ( s [ 'crest_length' ] , replaceParamFile ) , crestLowElevation = vrp ( s [ 'crest_low_elev' ] , replaceParamFile ) , dischargeCoeffForward = vrp ( s [ 'discharge_coeff_forward' ] , replaceParamFile ) , dischargeCoeffReverse = vrp ( s [ 'discharge_coeff_reverse' ] , replaceParamFile ) , crestLowLocation = vrp ( s [ 'crest_low_loc' ] , replaceParamFile ) , steepSlope = vrp ( s [ 'steep_slope' ] , replaceParamFile ) , shallowSlope = vrp ( s [ 'shallow_slope' ] , replaceParamFile ) ) # Associate Weir with StreamLink weir . streamLink = link elif structType in CULVERTS : # Culvert type handler # Initialize GSSHAPY Culvert object culvert = Culvert ( type = structType , upstreamInvert = vrp ( s [ 'upinvert' ] , replaceParamFile ) , downstreamInvert = vrp ( s [ 'downinvert' ] , replaceParamFile ) , inletDischargeCoeff = vrp ( s [ 'inlet_disch_coeff' ] , replaceParamFile ) , reverseFlowDischargeCoeff = vrp ( s [ 'rev_flow_disch_coeff' ] , replaceParamFile ) , slope = vrp ( s [ 'slope' ] , replaceParamFile ) , length = vrp ( s [ 'length' ] , replaceParamFile ) , roughness = vrp ( s [ 'rough_coeff' ] , replaceParamFile ) , diameter = vrp ( s [ 'diameter' ] , replaceParamFile ) , width = vrp ( s [ 'width' ] , replaceParamFile ) , height = vrp ( s [ 'height' ] , replaceParamFile ) ) # Associate Culvert with StreamLink culvert . streamLink = link elif structType in CURVES : # Curve type handler pass return link
| 4,811
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L601-L664
|
[
"def",
"levinson_durbin",
"(",
"acdata",
",",
"order",
"=",
"None",
")",
":",
"if",
"order",
"is",
"None",
":",
"order",
"=",
"len",
"(",
"acdata",
")",
"-",
"1",
"elif",
"order",
">=",
"len",
"(",
"acdata",
")",
":",
"acdata",
"=",
"Stream",
"(",
"acdata",
")",
".",
"append",
"(",
"0",
")",
".",
"take",
"(",
"order",
"+",
"1",
")",
"# Inner product for filters based on above statistics",
"def",
"inner",
"(",
"a",
",",
"b",
")",
":",
"# Be careful, this depends on acdata !!!",
"return",
"sum",
"(",
"acdata",
"[",
"abs",
"(",
"i",
"-",
"j",
")",
"]",
"*",
"ai",
"*",
"bj",
"for",
"i",
",",
"ai",
"in",
"enumerate",
"(",
"a",
".",
"numlist",
")",
"for",
"j",
",",
"bj",
"in",
"enumerate",
"(",
"b",
".",
"numlist",
")",
")",
"try",
":",
"A",
"=",
"ZFilter",
"(",
"1",
")",
"for",
"m",
"in",
"xrange",
"(",
"1",
",",
"order",
"+",
"1",
")",
":",
"B",
"=",
"A",
"(",
"1",
"/",
"z",
")",
"*",
"z",
"**",
"-",
"m",
"A",
"-=",
"inner",
"(",
"A",
",",
"z",
"**",
"-",
"m",
")",
"/",
"inner",
"(",
"B",
",",
"B",
")",
"*",
"B",
"except",
"ZeroDivisionError",
":",
"raise",
"ParCorError",
"(",
"\"Can't find next PARCOR coefficient\"",
")",
"A",
".",
"error",
"=",
"inner",
"(",
"A",
",",
"A",
")",
"return",
"A"
] |
Create GSSHAPY Reservoir Objects Method
|
def _createReservoir ( self , linkResult , replaceParamFile ) : # Extract header variables from link result object header = linkResult [ 'header' ] # Cases if linkResult [ 'type' ] == 'LAKE' : # Lake handler initWSE = vrp ( header [ 'initwse' ] , replaceParamFile ) minWSE = vrp ( header [ 'minwse' ] , replaceParamFile ) maxWSE = vrp ( header [ 'maxwse' ] , replaceParamFile ) numPts = header [ 'numpts' ] elif linkResult [ 'type' ] == 'RESERVOIR' : # Reservoir handler initWSE = vrp ( header [ 'res_initwse' ] , replaceParamFile ) minWSE = vrp ( header [ 'res_minwse' ] , replaceParamFile ) maxWSE = vrp ( header [ 'res_maxwse' ] , replaceParamFile ) numPts = header [ 'res_numpts' ] # Initialize GSSHAPY Reservoir object reservoir = Reservoir ( initWSE = initWSE , minWSE = minWSE , maxWSE = maxWSE ) # Initialize GSSHAPY StreamLink object link = StreamLink ( linkNumber = int ( header [ 'link' ] ) , type = linkResult [ 'type' ] , numElements = numPts ) # Associate StreamLink with ChannelInputFile link . channelInputFile = self # Associate Reservoir with StreamLink reservoir . streamLink = link # Create ReservoirPoint objects for p in linkResult [ 'points' ] : # Initialize GSSHAPY ReservoirPoint object resPoint = ReservoirPoint ( i = p [ 'i' ] , j = p [ 'j' ] ) # Associate ReservoirPoint with Reservoir resPoint . reservoir = reservoir return link
| 4,812
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L666-L713
|
[
"def",
"get_edit_url",
"(",
"self",
",",
"md_id",
":",
"str",
"=",
"None",
",",
"md_type",
":",
"str",
"=",
"None",
",",
"owner_id",
":",
"str",
"=",
"None",
",",
"tab",
":",
"str",
"=",
"\"identification\"",
",",
")",
":",
"# checks inputs",
"if",
"not",
"checker",
".",
"check_is_uuid",
"(",
"md_id",
")",
"or",
"not",
"checker",
".",
"check_is_uuid",
"(",
"owner_id",
")",
":",
"raise",
"ValueError",
"(",
"\"One of md_id or owner_id is not a correct UUID.\"",
")",
"else",
":",
"pass",
"if",
"checker",
".",
"check_edit_tab",
"(",
"tab",
",",
"md_type",
"=",
"md_type",
")",
":",
"pass",
"# construct URL",
"return",
"(",
"\"{}\"",
"\"/groups/{}\"",
"\"/resources/{}\"",
"\"/{}\"",
".",
"format",
"(",
"self",
".",
"APP_URLS",
".",
"get",
"(",
"self",
".",
"platform",
")",
",",
"owner_id",
",",
"md_id",
",",
"tab",
")",
")"
] |
Create PostGIS geometric objects
|
def _createGeometry ( self , session , spatialReferenceID ) : # Flush the current session session . flush ( ) # Create geometry for each fluvial link for link in self . getFluvialLinks ( ) : # Retrieve the nodes for each link nodes = link . nodes nodeCoordinates = [ ] # Create geometry for each node for node in nodes : # Assemble coordinates in well known text format coordinates = '{0} {1} {2}' . format ( node . x , node . y , node . elevation ) nodeCoordinates . append ( coordinates ) # Create well known text string for point with z coordinate wktPoint = 'POINT Z ({0})' . format ( coordinates ) # Write SQL statement statement = self . _getUpdateGeometrySqlString ( geometryID = node . id , tableName = node . tableName , spatialReferenceID = spatialReferenceID , wktString = wktPoint ) session . execute ( statement ) # Assemble line string in well known text format wktLineString = 'LINESTRING Z ({0})' . format ( ', ' . join ( nodeCoordinates ) ) # Write SQL statement statement = self . _getUpdateGeometrySqlString ( geometryID = link . id , tableName = link . tableName , spatialReferenceID = spatialReferenceID , wktString = wktLineString ) session . execute ( statement )
| 4,813
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L715-L755
|
[
"def",
"_ar_matrix",
"(",
"self",
")",
":",
"X",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"data_length",
"-",
"self",
".",
"max_lag",
")",
"if",
"self",
".",
"ar",
"!=",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"ar",
")",
":",
"X",
"=",
"np",
".",
"vstack",
"(",
"(",
"X",
",",
"self",
".",
"data",
"[",
"(",
"self",
".",
"max_lag",
"-",
"i",
"-",
"1",
")",
":",
"-",
"i",
"-",
"1",
"]",
")",
")",
"return",
"X"
] |
Write Connectivity Lines to File Method
|
def _writeConnectivity ( self , links , fileObject ) : for link in links : linkNum = link . linkNumber downLink = link . downstreamLinkID numUpLinks = link . numUpstreamLinks upLinks = '' for upLink in link . upstreamLinks : upLinks = '{}{:>5}' . format ( upLinks , str ( upLink . upstreamLinkID ) ) line = 'CONNECT{:>5}{:>5}{:>5}{}\n' . format ( linkNum , downLink , numUpLinks , upLinks ) fileObject . write ( line ) fileObject . write ( '\n' )
| 4,814
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L757-L771
|
[
"def",
"extract_secrets_from_android_rooted",
"(",
"adb_path",
"=",
"'adb'",
")",
":",
"data",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"adb_path",
",",
"'shell'",
",",
"'su'",
",",
"'-c'",
",",
"\"'cat /data/data/com.valvesoftware.android.steam.community/files/Steamguard*'\"",
"]",
")",
"# When adb daemon is not running, `adb` will print a couple of lines before our data.",
"# The data doesn't have new lines and its always on the last line.",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"-",
"1",
"]",
"if",
"data",
"[",
"0",
"]",
"!=",
"\"{\"",
":",
"raise",
"RuntimeError",
"(",
"\"Got invalid data: %s\"",
"%",
"repr",
"(",
"data",
")",
")",
"return",
"{",
"int",
"(",
"x",
"[",
"'steamid'",
"]",
")",
":",
"x",
"for",
"x",
"in",
"map",
"(",
"json",
".",
"loads",
",",
"data",
".",
"replace",
"(",
"\"}{\"",
",",
"'}|||||{'",
")",
".",
"split",
"(",
"'|||||'",
")",
")",
"}"
] |
Write Link Lines to File Method
|
def _writeLinks ( self , links , fileObject , replaceParamFile ) : for link in links : linkType = link . type fileObject . write ( 'LINK %s\n' % link . linkNumber ) # Cases if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType : self . _writeCrossSectionLink ( link , fileObject , replaceParamFile ) elif linkType == 'STRUCTURE' : self . _writeStructureLink ( link , fileObject , replaceParamFile ) elif linkType in ( 'RESERVOIR' , 'LAKE' ) : self . _writeReservoirLink ( link , fileObject , replaceParamFile ) else : log . error ( 'OOPS: CIF LINE 417' ) # THIS SHOULDN'T HAPPEN fileObject . write ( '\n' )
| 4,815
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L773-L794
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Write Cross Section Link to File Method
|
def _writeCrossSectionLink ( self , link , fileObject , replaceParamFile ) : linkType = link . type # Write cross section link header dx = vwp ( link . dx , replaceParamFile ) try : fileObject . write ( 'DX %.6f\n' % dx ) except : fileObject . write ( 'DX %s\n' % dx ) fileObject . write ( '%s\n' % linkType ) fileObject . write ( 'NODES %s\n' % link . numElements ) for node in link . nodes : # Write node information fileObject . write ( 'NODE %s\n' % node . nodeNumber ) fileObject . write ( 'X_Y %.6f %.6f\n' % ( node . x , node . y ) ) fileObject . write ( 'ELEV %.6f\n' % node . elevation ) if node . nodeNumber == 1 : # Write cross section information after first node fileObject . write ( 'XSEC\n' ) # Cases if 'TRAPEZOID' in linkType or 'TRAP' in linkType : # Retrieve cross section xSec = link . trapezoidalCS # Write cross section properties mannings_n = vwp ( xSec . mannings_n , replaceParamFile ) bottomWidth = vwp ( xSec . bottomWidth , replaceParamFile ) bankfullDepth = vwp ( xSec . bankfullDepth , replaceParamFile ) sideSlope = vwp ( xSec . sideSlope , replaceParamFile ) try : fileObject . write ( 'MANNINGS_N %.6f\n' % mannings_n ) except : fileObject . write ( 'MANNINGS_N %s\n' % mannings_n ) try : fileObject . write ( 'BOTTOM_WIDTH %.6f\n' % bottomWidth ) except : fileObject . write ( 'BOTTOM_WIDTH %s\n' % bottomWidth ) try : fileObject . write ( 'BANKFULL_DEPTH %.6f\n' % bankfullDepth ) except : fileObject . write ( 'BANKFULL_DEPTH %s\n' % bankfullDepth ) try : fileObject . write ( 'SIDE_SLOPE %.6f\n' % sideSlope ) except : fileObject . write ( 'SIDE_SLOPE %s\n' % sideSlope ) # Write optional cross section properties self . _writeOptionalXsecCards ( fileObject = fileObject , xSec = xSec , replaceParamFile = replaceParamFile ) elif 'BREAKPOINT' in linkType : # Retrieve cross section xSec = link . breakpointCS # Write cross section properties mannings_n = vwp ( xSec . mannings_n , replaceParamFile ) try : fileObject . write ( 'MANNINGS_N %.6f\n' % mannings_n ) except : fileObject . write ( 'MANNINGS_N %s\n' % mannings_n ) fileObject . write ( 'NPAIRS %s\n' % xSec . numPairs ) fileObject . write ( 'NUM_INTERP %s\n' % vwp ( xSec . numInterp , replaceParamFile ) ) # Write optional cross section properties self . _writeOptionalXsecCards ( fileObject = fileObject , xSec = xSec , replaceParamFile = replaceParamFile ) # Write breakpoint lines for bp in xSec . breakpoints : fileObject . write ( 'X1 %.6f %.6f\n' % ( bp . x , bp . y ) ) else : log . error ( 'OOPS: MISSED A CROSS SECTION TYPE. CIF LINE 580. {0}' . format ( linkType ) )
| 4,816
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L1003-L1085
|
[
"def",
"get_changed_devices",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"payload",
"=",
"{",
"}",
"else",
":",
"payload",
"=",
"{",
"'timeout'",
":",
"SUBSCRIPTION_WAIT",
",",
"'minimumdelay'",
":",
"SUBSCRIPTION_MIN_WAIT",
"}",
"payload",
".",
"update",
"(",
"timestamp",
")",
"# double the timeout here so requests doesn't timeout before vera",
"payload",
".",
"update",
"(",
"{",
"'id'",
":",
"'lu_sdata'",
",",
"}",
")",
"logger",
".",
"debug",
"(",
"\"get_changed_devices() requesting payload %s\"",
",",
"str",
"(",
"payload",
")",
")",
"r",
"=",
"self",
".",
"data_request",
"(",
"payload",
",",
"TIMEOUT",
"*",
"2",
")",
"r",
".",
"raise_for_status",
"(",
")",
"# If the Vera disconnects before writing a full response (as lu_sdata",
"# will do when interrupted by a Luup reload), the requests module will",
"# happily return 200 with an empty string. So, test for empty response,",
"# so we don't rely on the JSON parser to throw an exception.",
"if",
"r",
".",
"text",
"==",
"\"\"",
":",
"raise",
"PyveraError",
"(",
"\"Empty response from Vera\"",
")",
"# Catch a wide swath of what the JSON parser might throw, within",
"# reason. Unfortunately, some parsers don't specifically return",
"# json.decode.JSONDecodeError, but so far most seem to derive what",
"# they do throw from ValueError, so that's helpful.",
"try",
":",
"result",
"=",
"r",
".",
"json",
"(",
")",
"except",
"ValueError",
"as",
"ex",
":",
"raise",
"PyveraError",
"(",
"\"JSON decode error: \"",
"+",
"str",
"(",
"ex",
")",
")",
"if",
"not",
"(",
"type",
"(",
"result",
")",
"is",
"dict",
"and",
"'loadtime'",
"in",
"result",
"and",
"'dataversion'",
"in",
"result",
")",
":",
"raise",
"PyveraError",
"(",
"\"Unexpected/garbled response from Vera\"",
")",
"# At this point, all good. Update timestamp and return change data.",
"device_data",
"=",
"result",
".",
"get",
"(",
"'devices'",
")",
"timestamp",
"=",
"{",
"'loadtime'",
":",
"result",
".",
"get",
"(",
"'loadtime'",
")",
",",
"'dataversion'",
":",
"result",
".",
"get",
"(",
"'dataversion'",
")",
"}",
"return",
"[",
"device_data",
",",
"timestamp",
"]"
] |
Write Optional Cross Section Cards to File Method
|
def _writeOptionalXsecCards ( self , fileObject , xSec , replaceParamFile ) : if xSec . erode : fileObject . write ( 'ERODE\n' ) if xSec . maxErosion != None : fileObject . write ( 'MAX_EROSION %.6f\n' % xSec . maxErosion ) if xSec . subsurface : fileObject . write ( 'SUBSURFACE\n' ) if xSec . mRiver != None : mRiver = vwp ( xSec . mRiver , replaceParamFile ) try : fileObject . write ( 'M_RIVER %.6f\n' % mRiver ) except : fileObject . write ( 'M_RIVER %s\n' % mRiver ) if xSec . kRiver != None : kRiver = vwp ( xSec . kRiver , replaceParamFile ) try : fileObject . write ( 'K_RIVER %.6f\n' % kRiver ) except : fileObject . write ( 'K_RIVER %s\n' % kRiver )
| 4,817
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L1087-L1112
|
[
"def",
"define_all_protein_rings",
"(",
"self",
")",
":",
"self",
".",
"protein_rings",
"=",
"{",
"}",
"i",
"=",
"0",
"for",
"residue",
"in",
"self",
".",
"topology_data",
".",
"dict_of_plotted_res",
":",
"for",
"ring",
"in",
"self",
".",
"rings",
":",
"if",
"ring",
"[",
"0",
"]",
"==",
"residue",
"[",
"0",
"]",
":",
"atom_names",
"=",
"\"\"",
"for",
"atom",
"in",
"self",
".",
"rings",
"[",
"ring",
"]",
":",
"atom_names",
"=",
"atom_names",
"+",
"\" \"",
"+",
"atom",
"self",
".",
"protein_rings",
"[",
"i",
"]",
"=",
"self",
".",
"topology_data",
".",
"universe",
".",
"select_atoms",
"(",
"\"resname \"",
"+",
"residue",
"[",
"0",
"]",
"+",
"\" and resid \"",
"+",
"residue",
"[",
"1",
"]",
"+",
"\" and segid \"",
"+",
"residue",
"[",
"2",
"]",
"+",
"\" and name \"",
"+",
"atom_names",
")",
"i",
"+=",
"1"
] |
Replaces to_file with from_file
|
def replace_file ( from_file , to_file ) : try : os . remove ( to_file ) except OSError : pass copy ( from_file , to_file )
| 4,818
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L29-L37
|
[
"def",
"setGroups",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"requests",
"=",
"0",
"groups",
"=",
"[",
"]",
"try",
":",
"for",
"gk",
"in",
"self",
"[",
"'groupKeys'",
"]",
":",
"try",
":",
"g",
"=",
"self",
".",
"mambugroupclass",
"(",
"entid",
"=",
"gk",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"AttributeError",
"as",
"ae",
":",
"from",
".",
"mambugroup",
"import",
"MambuGroup",
"self",
".",
"mambugroupclass",
"=",
"MambuGroup",
"g",
"=",
"self",
".",
"mambugroupclass",
"(",
"entid",
"=",
"gk",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"requests",
"+=",
"1",
"groups",
".",
"append",
"(",
"g",
")",
"except",
"KeyError",
":",
"pass",
"self",
"[",
"'groups'",
"]",
"=",
"groups",
"return",
"requests"
] |
Determines whether to prepare gage data from LSM
|
def _prepare_lsm_gag ( self ) : lsm_required_vars = ( self . lsm_precip_data_var , self . lsm_precip_type ) return self . lsm_input_valid and ( None not in lsm_required_vars )
| 4,819
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L338-L345
|
[
"async",
"def",
"open_search",
"(",
"type_",
":",
"str",
",",
"query",
":",
"dict",
",",
"options",
":",
"dict",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"not",
"hasattr",
"(",
"Wallet",
".",
"open_search",
",",
"\"cb\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"vcx_wallet_open_search: Creating callback\"",
")",
"Wallet",
".",
"open_search",
".",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_uint32",
",",
"c_uint32",
",",
"c_uint32",
")",
")",
"c_type_",
"=",
"c_char_p",
"(",
"type_",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"c_query",
"=",
"c_char_p",
"(",
"json",
".",
"dumps",
"(",
"query",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"c_options",
"=",
"c_char_p",
"(",
"json",
".",
"dumps",
"(",
"options",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"options",
"else",
"None",
"data",
"=",
"await",
"do_call",
"(",
"'vcx_wallet_open_search'",
",",
"c_type_",
",",
"c_query",
",",
"c_options",
",",
"Wallet",
".",
"open_search",
".",
"cb",
")",
"logger",
".",
"debug",
"(",
"\"vcx_wallet_open_search completed\"",
")",
"return",
"data"
] |
Moves card to new gssha working directory
|
def _update_card_file_location ( self , card_name , new_directory ) : with tmp_chdir ( self . gssha_directory ) : file_card = self . project_manager . getCard ( card_name ) if file_card : if file_card . value : original_location = file_card . value . strip ( "'" ) . strip ( '"' ) new_location = os . path . join ( new_directory , os . path . basename ( original_location ) ) file_card . value = '"{0}"' . format ( os . path . basename ( original_location ) ) try : move ( original_location , new_location ) except OSError as ex : log . warning ( ex ) pass
| 4,820
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L366-L382
|
[
"def",
"list_tables",
"(",
"self",
")",
":",
"# Server does not do pagination on listings of this resource.",
"# Return an iterator anyway for similarity with other API methods",
"path",
"=",
"'/archive/{}/tables'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"path",
"=",
"path",
")",
"message",
"=",
"rest_pb2",
".",
"ListTablesResponse",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"tables",
"=",
"getattr",
"(",
"message",
",",
"'table'",
")",
"return",
"iter",
"(",
"[",
"Table",
"(",
"table",
")",
"for",
"table",
"in",
"tables",
"]",
")"
] |
Downloads Streamflow Prediction Tool forecast data
|
def download_spt_forecast ( self , extract_directory ) : needed_vars = ( self . spt_watershed_name , self . spt_subbasin_name , self . spt_forecast_date_string , self . ckan_engine_url , self . ckan_api_key , self . ckan_owner_organization ) if None not in needed_vars : er_manager = ECMWFRAPIDDatasetManager ( self . ckan_engine_url , self . ckan_api_key , self . ckan_owner_organization ) # TODO: Modify to only download one of the forecasts in the ensemble er_manager . download_prediction_dataset ( watershed = self . spt_watershed_name , subbasin = self . spt_subbasin_name , date_string = self . spt_forecast_date_string , # '20160711.1200' extract_directory = extract_directory ) return glob ( os . path . join ( extract_directory , self . spt_forecast_date_string , "Qout*52.nc" ) ) [ 0 ] elif needed_vars . count ( None ) == len ( needed_vars ) : log . info ( "Skipping streamflow forecast download ..." ) return None else : raise ValueError ( "To download the forecasts, you need to set: \n" "spt_watershed_name, spt_subbasin_name, spt_forecast_date_string \n" "ckan_engine_url, ckan_api_key, and ckan_owner_organization." )
| 4,821
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L384-L414
|
[
"def",
"matched",
"(",
"self",
",",
"other",
")",
":",
"mods",
"=",
"[",
"\"allocatable\"",
",",
"\"pointer\"",
"]",
"return",
"(",
"self",
".",
"kind",
".",
"lower",
"(",
")",
"==",
"other",
".",
"kind",
".",
"lower",
"(",
")",
"and",
"self",
".",
"dtype",
".",
"lower",
"(",
")",
"==",
"other",
".",
"dtype",
".",
"lower",
"(",
")",
"and",
"self",
".",
"D",
"==",
"other",
".",
"D",
"and",
"all",
"(",
"[",
"m",
"in",
"other",
".",
"modifiers",
"for",
"m",
"in",
"self",
".",
"modifiers",
"if",
"m",
"in",
"mods",
"]",
")",
")"
] |
Prepare HMET data for simulation
|
def prepare_hmet ( self ) : if self . _prepare_lsm_hmet : netcdf_file_path = None hmet_ascii_output_folder = None if self . output_netcdf : netcdf_file_path = '{0}_hmet.nc' . format ( self . project_manager . name ) if self . hotstart_minimal_mode : netcdf_file_path = '{0}_hmet_hotstart.nc' . format ( self . project_manager . name ) else : hmet_ascii_output_folder = 'hmet_data_{0}to{1}' if self . hotstart_minimal_mode : hmet_ascii_output_folder += "_hotstart" self . event_manager . prepare_hmet_lsm ( self . lsm_data_var_map_array , hmet_ascii_output_folder , netcdf_file_path ) self . simulation_modified_input_cards += [ "HMET_NETCDF" , "HMET_ASCII" ] else : log . info ( "HMET preparation skipped due to missing parameters ..." )
| 4,822
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L416-L438
|
[
"def",
"write",
"(",
"self",
",",
"originalPrefix",
",",
"newPrefix",
"=",
"None",
")",
":",
"# Determine number of spaces between card and value for nice alignment",
"numSpaces",
"=",
"max",
"(",
"2",
",",
"25",
"-",
"len",
"(",
"self",
".",
"name",
")",
")",
"# Handle special case of booleans",
"if",
"self",
".",
"value",
"is",
"None",
":",
"line",
"=",
"'%s\\n'",
"%",
"self",
".",
"name",
"else",
":",
"if",
"self",
".",
"name",
"==",
"'WMS'",
":",
"line",
"=",
"'%s %s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"value",
")",
"elif",
"newPrefix",
"is",
"None",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
")",
"elif",
"originalPrefix",
"in",
"self",
".",
"value",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
".",
"replace",
"(",
"originalPrefix",
",",
"newPrefix",
")",
")",
"else",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
")",
"return",
"line"
] |
Prepare gage data for simulation
|
def prepare_gag ( self ) : if self . _prepare_lsm_gag : self . event_manager . prepare_gag_lsm ( self . lsm_precip_data_var , self . lsm_precip_type , self . precip_interpolation_type ) self . simulation_modified_input_cards . append ( "PRECIP_FILE" ) else : log . info ( "Gage file preparation skipped due to missing parameters ..." )
| 4,823
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L440-L450
|
[
"def",
"data2md",
"(",
"table",
")",
":",
"table",
"=",
"copy",
".",
"deepcopy",
"(",
"table",
")",
"table",
"=",
"ensure_table_strings",
"(",
"table",
")",
"table",
"=",
"multis_2_mono",
"(",
"table",
")",
"table",
"=",
"add_cushions",
"(",
"table",
")",
"widths",
"=",
"[",
"]",
"for",
"column",
"in",
"range",
"(",
"len",
"(",
"table",
"[",
"0",
"]",
")",
")",
":",
"widths",
".",
"append",
"(",
"get_column_width",
"(",
"column",
",",
"table",
")",
")",
"output",
"=",
"'|'",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"table",
"[",
"0",
"]",
")",
")",
":",
"output",
"=",
"''",
".",
"join",
"(",
"[",
"output",
",",
"center_line",
"(",
"widths",
"[",
"i",
"]",
",",
"table",
"[",
"0",
"]",
"[",
"i",
"]",
")",
",",
"'|'",
"]",
")",
"output",
"=",
"output",
"+",
"'\\n|'",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"table",
"[",
"0",
"]",
")",
")",
":",
"output",
"=",
"''",
".",
"join",
"(",
"[",
"output",
",",
"center_line",
"(",
"widths",
"[",
"i",
"]",
",",
"\"-\"",
"*",
"widths",
"[",
"i",
"]",
")",
",",
"'|'",
"]",
")",
"output",
"=",
"output",
"+",
"'\\n|'",
"for",
"row",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"table",
")",
")",
":",
"for",
"column",
"in",
"range",
"(",
"len",
"(",
"table",
"[",
"row",
"]",
")",
")",
":",
"output",
"=",
"''",
".",
"join",
"(",
"[",
"output",
",",
"center_line",
"(",
"widths",
"[",
"column",
"]",
",",
"table",
"[",
"row",
"]",
"[",
"column",
"]",
")",
",",
"'|'",
"]",
")",
"output",
"=",
"output",
"+",
"'\\n|'",
"split",
"=",
"output",
".",
"split",
"(",
"'\\n'",
")",
"split",
".",
"pop",
"(",
")",
"table_string",
"=",
"'\\n'",
".",
"join",
"(",
"split",
")",
"return",
"table_string"
] |
Prepare RAPID data for simulation
|
def rapid_to_gssha ( self ) : # if no streamflow given, download forecast if self . path_to_rapid_qout is None and self . connection_list_file : rapid_qout_directory = os . path . join ( self . gssha_directory , 'rapid_streamflow' ) try : os . mkdir ( rapid_qout_directory ) except OSError : pass self . path_to_rapid_qout = self . download_spt_forecast ( rapid_qout_directory ) # prepare input for GSSHA if user wants if self . path_to_rapid_qout is not None and self . connection_list_file : self . event_manager . prepare_rapid_streamflow ( self . path_to_rapid_qout , self . connection_list_file ) self . simulation_modified_input_cards . append ( 'CHAN_POINT_INPUT' )
| 4,824
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L452-L469
|
[
"def",
"delete",
"(",
"self",
",",
"file_id",
")",
":",
"res",
"=",
"self",
".",
"_files",
".",
"delete_one",
"(",
"{",
"\"_id\"",
":",
"file_id",
"}",
")",
"self",
".",
"_chunks",
".",
"delete_many",
"(",
"{",
"\"files_id\"",
":",
"file_id",
"}",
")",
"if",
"not",
"res",
".",
"deleted_count",
":",
"raise",
"NoFile",
"(",
"\"no file could be deleted because none matched %s\"",
"%",
"file_id",
")"
] |
Prepare simulation hotstart info
|
def hotstart ( self ) : if self . write_hotstart : hotstart_time_str = self . event_manager . simulation_end . strftime ( "%Y%m%d_%H%M" ) try : os . mkdir ( 'hotstart' ) except OSError : pass ov_hotstart_path = os . path . join ( '..' , 'hotstart' , '{0}_ov_hotstart_{1}.ovh' . format ( self . project_manager . name , hotstart_time_str ) ) self . _update_card ( "WRITE_OV_HOTSTART" , ov_hotstart_path , True ) chan_hotstart_path = os . path . join ( '..' , 'hotstart' , '{0}_chan_hotstart_{1}' . format ( self . project_manager . name , hotstart_time_str ) ) self . _update_card ( "WRITE_CHAN_HOTSTART" , chan_hotstart_path , True ) sm_hotstart_path = os . path . join ( '..' , 'hotstart' , '{0}_sm_hotstart_{1}.smh' . format ( self . project_manager . name , hotstart_time_str ) ) self . _update_card ( "WRITE_SM_HOTSTART" , sm_hotstart_path , True ) else : self . _delete_card ( "WRITE_OV_HOTSTART" ) self . _delete_card ( "WRITE_CHAN_HOTSTART" ) self . _delete_card ( "WRITE_SM_HOTSTART" ) if self . read_hotstart : hotstart_time_str = self . event_manager . simulation_start . strftime ( "%Y%m%d_%H%M" ) # OVERLAND expected_ov_hotstart = os . path . join ( 'hotstart' , '{0}_ov_hotstart_{1}.ovh' . format ( self . project_manager . name , hotstart_time_str ) ) if os . path . exists ( expected_ov_hotstart ) : self . _update_card ( "READ_OV_HOTSTART" , os . path . join ( ".." , expected_ov_hotstart ) , True ) else : self . _delete_card ( "READ_OV_HOTSTART" ) log . warning ( "READ_OV_HOTSTART not included as " "{0} does not exist ..." . format ( expected_ov_hotstart ) ) # CHANNEL expected_chan_hotstart = os . path . join ( 'hotstart' , '{0}_chan_hotstart_{1}' . format ( self . project_manager . name , hotstart_time_str ) ) if os . path . exists ( "{0}.qht" . format ( expected_chan_hotstart ) ) and os . path . exists ( "{0}.dht" . format ( expected_chan_hotstart ) ) : self . _update_card ( "READ_CHAN_HOTSTART" , os . path . join ( ".." , expected_chan_hotstart ) , True ) else : self . _delete_card ( "READ_CHAN_HOTSTART" ) log . warning ( "READ_CHAN_HOTSTART not included as " "{0}.qht and/or {0}.dht does not exist ..." . format ( expected_chan_hotstart ) ) # INFILTRATION expected_sm_hotstart = os . path . join ( 'hotstart' , '{0}_sm_hotstart_{1}.smh' . format ( self . project_manager . name , hotstart_time_str ) ) if os . path . exists ( expected_sm_hotstart ) : self . _update_card ( "READ_SM_HOTSTART" , os . path . join ( ".." , expected_sm_hotstart ) , True ) else : self . _delete_card ( "READ_SM_HOTSTART" ) log . warning ( "READ_SM_HOTSTART not included as" " {0} does not exist ..." . format ( expected_sm_hotstart ) )
| 4,825
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L471-L533
|
[
"def",
"_format_response",
"(",
"rows",
",",
"fields",
",",
"unique_col_names",
")",
":",
"output",
"=",
"[",
"]",
"old_id",
"=",
"None",
"for",
"row",
"in",
"rows",
":",
"id_",
"=",
"{",
"k",
":",
"row",
"[",
"k",
"]",
"for",
"k",
"in",
"unique_col_names",
"}",
"formatted",
"=",
"{",
"k",
":",
"row",
"[",
"k",
"]",
"for",
"k",
"in",
"row",
"if",
"k",
"!=",
"'data'",
"}",
"if",
"id_",
"!=",
"old_id",
":",
"# new unique versioned row",
"data",
"=",
"row",
"[",
"'data'",
"]",
"formatted",
"[",
"'data'",
"]",
"=",
"{",
"k",
":",
"data",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"fields",
"}",
"output",
".",
"append",
"(",
"formatted",
")",
"else",
":",
"data",
"=",
"row",
"[",
"'data'",
"]",
"pruned_data",
"=",
"{",
"k",
":",
"data",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"fields",
"}",
"if",
"(",
"pruned_data",
"!=",
"output",
"[",
"-",
"1",
"]",
"[",
"'data'",
"]",
"or",
"row",
"[",
"'deleted'",
"]",
"!=",
"output",
"[",
"-",
"1",
"]",
"[",
"'deleted'",
"]",
")",
":",
"formatted",
"[",
"'data'",
"]",
"=",
"pruned_data",
"output",
".",
"append",
"(",
"formatted",
")",
"old_id",
"=",
"id_",
"return",
"output"
] |
Updates card & runs for RAPID to GSSHA & LSM to GSSHA
|
def run_forecast ( self ) : # ---------------------------------------------------------------------- # LSM to GSSHA # ---------------------------------------------------------------------- self . prepare_hmet ( ) self . prepare_gag ( ) # ---------------------------------------------------------------------- # RAPID to GSSHA # ---------------------------------------------------------------------- self . rapid_to_gssha ( ) # ---------------------------------------------------------------------- # HOTSTART # ---------------------------------------------------------------------- self . hotstart ( ) # ---------------------------------------------------------------------- # Run GSSHA # ---------------------------------------------------------------------- return self . run ( )
| 4,826
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L676-L700
|
[
"def",
"_update_offset_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"on_update",
":",
"self",
".",
"on_update",
"(",
")",
"offset",
"=",
"self",
".",
"_filehandle",
"(",
")",
".",
"tell",
"(",
")",
"inode",
"=",
"stat",
"(",
"self",
".",
"filename",
")",
".",
"st_ino",
"fh",
"=",
"open",
"(",
"self",
".",
"_offset_file",
",",
"\"w\"",
")",
"fh",
".",
"write",
"(",
"\"%s\\n%s\\n\"",
"%",
"(",
"inode",
",",
"offset",
")",
")",
"fh",
".",
"close",
"(",
")",
"self",
".",
"_since_update",
"=",
"0"
] |
Create the cache key for the current page and tag type
|
def get_cache_key ( request , page , lang , site_id , title ) : from cms . cache import _get_cache_key from cms . templatetags . cms_tags import _get_page_by_untyped_arg from cms . models import Page if not isinstance ( page , Page ) : page = _get_page_by_untyped_arg ( page , request , site_id ) if not site_id : try : site_id = page . node . site_id except AttributeError : # CMS_3_4 site_id = page . site_id if not title : return _get_cache_key ( 'page_tags' , page , '' , site_id ) + '_type:tags_list' else : return _get_cache_key ( 'title_tags' , page , lang , site_id ) + '_type:tags_list'
| 4,827
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L5-L23
|
[
"def",
"__normalize",
"(",
"self",
")",
":",
"# Don't normalize if we're already normalizing or intializing",
"if",
"self",
".",
"__normalizing",
"is",
"True",
"or",
"self",
".",
"__initialized",
"is",
"False",
":",
"return",
"self",
".",
"__normalizing",
"=",
"True",
"self",
".",
"__normalize_grades",
"(",
")",
"self",
".",
"__normalize_progress",
"(",
")",
"self",
".",
"__normalizing",
"=",
"False"
] |
Retrieves all the tags for a Page instance .
|
def get_page_tags ( page ) : from . models import PageTags try : return page . pagetags . tags . all ( ) except PageTags . DoesNotExist : return [ ]
| 4,828
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L26-L39
|
[
"def",
"restore",
"(",
"archive",
",",
"oqdata",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"oqdata",
")",
":",
"sys",
".",
"exit",
"(",
"'%s exists already'",
"%",
"oqdata",
")",
"if",
"'://'",
"in",
"archive",
":",
"# get the zip archive from an URL",
"resp",
"=",
"requests",
".",
"get",
"(",
"archive",
")",
"_",
",",
"archive",
"=",
"archive",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"with",
"open",
"(",
"archive",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"resp",
".",
"content",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"archive",
")",
":",
"sys",
".",
"exit",
"(",
"'%s does not exist'",
"%",
"archive",
")",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"oqdata",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"oqdata",
")",
"assert",
"archive",
".",
"endswith",
"(",
"'.zip'",
")",
",",
"archive",
"os",
".",
"mkdir",
"(",
"oqdata",
")",
"zipfile",
".",
"ZipFile",
"(",
"archive",
")",
".",
"extractall",
"(",
"oqdata",
")",
"dbpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"oqdata",
",",
"'db.sqlite3'",
")",
"db",
"=",
"Db",
"(",
"sqlite3",
".",
"connect",
",",
"dbpath",
",",
"isolation_level",
"=",
"None",
",",
"detect_types",
"=",
"sqlite3",
".",
"PARSE_DECLTYPES",
")",
"n",
"=",
"0",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"oqdata",
")",
":",
"mo",
"=",
"re",
".",
"match",
"(",
"'calc_(\\d+)\\.hdf5'",
",",
"fname",
")",
"if",
"mo",
":",
"job_id",
"=",
"int",
"(",
"mo",
".",
"group",
"(",
"1",
")",
")",
"fullname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"oqdata",
",",
"fname",
")",
"[",
":",
"-",
"5",
"]",
"# strip .hdf5",
"db",
"(",
"\"UPDATE job SET user_name=?x, ds_calc_dir=?x WHERE id=?x\"",
",",
"getpass",
".",
"getuser",
"(",
")",
",",
"fullname",
",",
"job_id",
")",
"safeprint",
"(",
"'Restoring '",
"+",
"fname",
")",
"n",
"+=",
"1",
"dt",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t0",
"safeprint",
"(",
"'Extracted %d calculations into %s in %d seconds'",
"%",
"(",
"n",
",",
"oqdata",
",",
"dt",
")",
")"
] |
Check if a Page object is associated with the given tag .
|
def page_has_tag ( page , tag ) : from . models import PageTags if hasattr ( tag , 'slug' ) : slug = tag . slug else : slug = tag try : return page . pagetags . tags . filter ( slug = slug ) . exists ( ) except PageTags . DoesNotExist : return False
| 4,829
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L42-L61
|
[
"def",
"__normalize",
"(",
"self",
")",
":",
"# Don't normalize if we're already normalizing or intializing",
"if",
"self",
".",
"__normalizing",
"is",
"True",
"or",
"self",
".",
"__initialized",
"is",
"False",
":",
"return",
"self",
".",
"__normalizing",
"=",
"True",
"self",
".",
"__normalize_grades",
"(",
")",
"self",
".",
"__normalize_progress",
"(",
")",
"self",
".",
"__normalizing",
"=",
"False"
] |
Check if a Title object is associated with the given tag . This function does not use fallbacks to retrieve title object .
|
def title_has_tag ( page , lang , tag ) : from . models import TitleTags if hasattr ( tag , 'slug' ) : slug = tag . slug else : slug = tag try : return page . get_title_obj ( language = lang , fallback = False ) . titletags . tags . filter ( slug = slug ) . exists ( ) except TitleTags . DoesNotExist : return False
| 4,830
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L82-L105
|
[
"def",
"unreduce_array",
"(",
"array",
",",
"shape",
",",
"axis",
",",
"keepdims",
")",
":",
"# NumPy uses a special default value for keepdims, which is equivalent to",
"# False.",
"if",
"axis",
"is",
"not",
"None",
"and",
"(",
"not",
"keepdims",
"or",
"keepdims",
"is",
"numpy",
".",
"_NoValue",
")",
":",
"# pylint: disable=protected-access",
"if",
"isinstance",
"(",
"axis",
",",
"int",
")",
":",
"axis",
"=",
"axis",
",",
"for",
"ax",
"in",
"sorted",
"(",
"axis",
")",
":",
"array",
"=",
"numpy",
".",
"expand_dims",
"(",
"array",
",",
"ax",
")",
"return",
"numpy",
".",
"broadcast_to",
"(",
"array",
",",
"shape",
")"
] |
Get the list of tags attached to a Page or a Title from a request from usual page_lookup parameters .
|
def get_page_tags_from_request ( request , page_lookup , lang , site , title = False ) : from cms . templatetags . cms_tags import _get_page_by_untyped_arg from cms . utils import get_language_from_request , get_site_id from django . core . cache import cache try : from cms . utils import get_cms_setting except ImportError : from cms . utils . conf import get_cms_setting site_id = get_site_id ( site ) if lang is None : lang = get_language_from_request ( request ) cache_key = get_cache_key ( request , page_lookup , lang , site , title ) tags_list = cache . get ( cache_key ) if not tags_list : page = _get_page_by_untyped_arg ( page_lookup , request , site_id ) if page : if title : tags_list = get_title_tags ( page , lang ) else : tags_list = get_page_tags ( page ) cache . set ( cache_key , tags_list , timeout = get_cms_setting ( 'CACHE_DURATIONS' ) [ 'content' ] ) if not tags_list : tags_list = ( ) return tags_list
| 4,831
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L108-L146
|
[
"def",
"unique",
"(",
"self",
")",
":",
"from",
".",
"sframe",
"import",
"SFrame",
"as",
"_SFrame",
"tmp_sf",
"=",
"_SFrame",
"(",
")",
"tmp_sf",
".",
"add_column",
"(",
"self",
",",
"'X1'",
",",
"inplace",
"=",
"True",
")",
"res",
"=",
"tmp_sf",
".",
"groupby",
"(",
"'X1'",
",",
"{",
"}",
")",
"return",
"SArray",
"(",
"_proxy",
"=",
"res",
"[",
"'X1'",
"]",
".",
"__proxy__",
")"
] |
Get the list of tags attached to a Title from a request from usual page_lookup parameters .
|
def get_title_tags_from_request ( request , page_lookup , lang , site ) : return get_page_tags_from_request ( request , page_lookup , lang , site , True )
| 4,832
|
https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L149-L162
|
[
"def",
"_update_flags",
"(",
"compiler_flags",
",",
"remove_flags",
"=",
"(",
")",
")",
":",
"for",
"flag",
"in",
"GFORTRAN_SHARED_FLAGS",
":",
"if",
"flag",
"not",
"in",
"compiler_flags",
":",
"compiler_flags",
".",
"append",
"(",
"flag",
")",
"if",
"DEBUG_ENV",
"in",
"os",
".",
"environ",
":",
"to_add",
"=",
"GFORTRAN_DEBUG_FLAGS",
"to_remove",
"=",
"GFORTRAN_OPTIMIZE_FLAGS",
"else",
":",
"to_add",
"=",
"GFORTRAN_OPTIMIZE_FLAGS",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"WHEEL_ENV",
")",
"is",
"None",
":",
"to_add",
"+=",
"(",
"GFORTRAN_NATIVE_FLAG",
",",
")",
"to_remove",
"=",
"GFORTRAN_DEBUG_FLAGS",
"for",
"flag",
"in",
"to_add",
":",
"if",
"flag",
"not",
"in",
"compiler_flags",
":",
"compiler_flags",
".",
"append",
"(",
"flag",
")",
"return",
"[",
"flag",
"for",
"flag",
"in",
"compiler_flags",
"if",
"not",
"(",
"flag",
"in",
"to_remove",
"or",
"flag",
"in",
"remove_flags",
")",
"]"
] |
Generates a mask from a watershed_shapefile
|
def generateFromWatershedShapefile ( self , shapefile_path , cell_size , out_raster_path = None , load_raster_to_db = True ) : if not self . projectFile : raise ValueError ( "Must be connected to project file ..." ) # match elevation grid if exists match_grid = None try : match_grid = self . projectFile . getGrid ( use_mask = False ) except ValueError : pass # match projection if exists wkt_projection = None try : wkt_projection = self . projectFile . getWkt ( ) except ValueError : pass if out_raster_path is None : out_raster_path = '{0}.{1}' . format ( self . projectFile . name , self . extension ) # make sure paths are absolute as the working directory changes shapefile_path = os . path . abspath ( shapefile_path ) # make sure the polygon is valid check_watershed_boundary_geometry ( shapefile_path ) gr = rasterize_shapefile ( shapefile_path , x_cell_size = cell_size , y_cell_size = cell_size , match_grid = match_grid , raster_nodata = 0 , as_gdal_grid = True , raster_wkt_proj = wkt_projection , convert_to_utm = True ) with tmp_chdir ( self . projectFile . project_directory ) : gr . to_grass_ascii ( out_raster_path , print_nodata = False ) self . filename = out_raster_path # update project file cards self . projectFile . setCard ( 'WATERSHED_MASK' , out_raster_path , add_quotes = True ) self . projectFile . setCard ( 'GRIDSIZE' , str ( ( gr . geotransform [ 1 ] - gr . geotransform [ - 1 ] ) / 2.0 ) ) self . projectFile . setCard ( 'ROWS' , str ( gr . y_size ) ) self . projectFile . setCard ( 'COLS' , str ( gr . x_size ) ) # write projection file if does not exist if wkt_projection is None : proj_file = ProjectionFile ( ) proj_file . projection = gr . wkt proj_file . projectFile = self . projectFile proj_path = "{0}_prj.pro" . format ( os . path . splitext ( out_raster_path ) [ 0 ] ) gr . write_prj ( proj_path ) self . projectFile . setCard ( '#PROJECTION_FILE' , proj_path , add_quotes = True ) # read raster into object if load_raster_to_db : self . _load_raster_text ( out_raster_path )
| 4,833
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/msk.py#L45-L143
|
[
"def",
"libvlc_media_list_player_is_playing",
"(",
"p_mlp",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_list_player_is_playing'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_list_player_is_playing'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"ctypes",
".",
"c_int",
",",
"MediaListPlayer",
")",
"return",
"f",
"(",
"p_mlp",
")"
] |
Change directory temporarily and return when done .
|
def tmp_chdir ( new_path ) : prev_cwd = os . getcwd ( ) os . chdir ( new_path ) try : yield finally : os . chdir ( prev_cwd )
| 4,834
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/util/context.py#L13-L20
|
[
"def",
"_concrete_acl",
"(",
"self",
",",
"acl_doc",
")",
":",
"if",
"not",
"isinstance",
"(",
"acl_doc",
",",
"dict",
")",
":",
"return",
"None",
"# Attempt to instantiate an Acl object with the given dict.",
"try",
":",
"return",
"Acl",
"(",
"document",
"=",
"acl_doc",
",",
"acls",
"=",
"self",
")",
"# If construction fails, log the exception and return None.",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"exception",
"(",
"ex",
")",
"logger",
".",
"error",
"(",
"'Could not instantiate ACL document. You probably need to upgrade to a '",
"'recent version of the client. Document which caused this error: {}'",
".",
"format",
"(",
"acl_doc",
")",
")",
"return",
"None"
] |
download ERA5 data for GSSHA domain
|
def _download ( self ) : # reproject GSSHA grid and get bounds min_x , max_x , min_y , max_y = self . gssha_grid . bounds ( as_geographic = True ) if self . era_download_data == 'era5' : log . info ( "Downloading ERA5 data ..." ) download_era5_for_gssha ( self . lsm_input_folder_path , self . download_start_datetime , self . download_end_datetime , leftlon = min_x - 0.5 , rightlon = max_x + 0.5 , toplat = max_y + 0.5 , bottomlat = min_y - 0.5 ) else : log . info ( "Downloading ERA Interim data ..." ) download_interim_for_gssha ( self . lsm_input_folder_path , self . download_start_datetime , self . download_end_datetime , leftlon = min_x - 1 , rightlon = max_x + 1 , toplat = max_y + 1 , bottomlat = min_y - 1 )
| 4,835
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/era_to_gssha.py#L381-L402
|
[
"def",
"files_comments_delete",
"(",
"self",
",",
"*",
",",
"file",
":",
"str",
",",
"id",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"SlackResponse",
":",
"kwargs",
".",
"update",
"(",
"{",
"\"file\"",
":",
"file",
",",
"\"id\"",
":",
"id",
"}",
")",
"return",
"self",
".",
"api_call",
"(",
"\"files.comments.delete\"",
",",
"json",
"=",
"kwargs",
")"
] |
Dispatch the request . Its the actual view flask will use .
|
def dispatch_request ( self , * args , * * kwargs ) : if request . method in ( 'POST' , 'PUT' ) : return_url , context = self . post ( * args , * * kwargs ) if return_url is not None : return redirect ( return_url ) elif request . method in ( 'GET' , 'HEAD' ) : context = self . get ( * args , * * kwargs ) return self . render_response ( self . context ( context ) )
| 4,836
|
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/views.py#L21-L31
|
[
"def",
"create",
"(",
"cls",
",",
"name",
",",
"port",
"=",
"179",
",",
"external_distance",
"=",
"20",
",",
"internal_distance",
"=",
"200",
",",
"local_distance",
"=",
"200",
",",
"subnet_distance",
"=",
"None",
")",
":",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'external'",
":",
"external_distance",
",",
"'internal'",
":",
"internal_distance",
",",
"'local'",
":",
"local_distance",
",",
"'port'",
":",
"port",
"}",
"if",
"subnet_distance",
":",
"d",
"=",
"[",
"{",
"'distance'",
":",
"distance",
",",
"'subnet'",
":",
"subnet",
".",
"href",
"}",
"for",
"subnet",
",",
"distance",
"in",
"subnet_distance",
"]",
"json",
".",
"update",
"(",
"distance_entry",
"=",
"d",
")",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] |
Runs a shell command returns console output .
|
def _run_cmd_get_output ( cmd ) : process = subprocess . Popen ( cmd . split ( ) , stdout = subprocess . PIPE ) out , err = process . communicate ( ) return out or err
| 4,837
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L15-L22
|
[
"def",
"percentOverlap",
"(",
"x1",
",",
"x2",
")",
":",
"nonZeroX1",
"=",
"np",
".",
"count_nonzero",
"(",
"x1",
")",
"nonZeroX2",
"=",
"np",
".",
"count_nonzero",
"(",
"x2",
")",
"percentOverlap",
"=",
"0",
"minX1X2",
"=",
"min",
"(",
"nonZeroX1",
",",
"nonZeroX2",
")",
"if",
"minX1X2",
">",
"0",
":",
"overlap",
"=",
"float",
"(",
"np",
".",
"dot",
"(",
"x1",
".",
"T",
",",
"x2",
")",
")",
"percentOverlap",
"=",
"overlap",
"/",
"minX1X2",
"return",
"percentOverlap"
] |
Parse out the repository identifier from a github URL .
|
def _remote_github_url_to_string ( remote_url ) : # TODO: make this work with https URLs match = re . search ( 'git@github\.com:(.*)\.git' , remote_url ) if not match : raise EnvironmentError ( 'Remote is not a valid github URL' ) identifier = match . group ( 1 ) return re . sub ( '\W' , ':' , identifier )
| 4,838
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L25-L32
|
[
"def",
"_AlignDecryptedDataOffset",
"(",
"self",
",",
"decrypted_data_offset",
")",
":",
"self",
".",
"_file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"self",
".",
"_decrypter",
"=",
"self",
".",
"_GetDecrypter",
"(",
")",
"self",
".",
"_decrypted_data",
"=",
"b''",
"encrypted_data_offset",
"=",
"0",
"encrypted_data_size",
"=",
"self",
".",
"_file_object",
".",
"get_size",
"(",
")",
"while",
"encrypted_data_offset",
"<",
"encrypted_data_size",
":",
"read_count",
"=",
"self",
".",
"_ReadEncryptedData",
"(",
"self",
".",
"_ENCRYPTED_DATA_BUFFER_SIZE",
")",
"if",
"read_count",
"==",
"0",
":",
"break",
"encrypted_data_offset",
"+=",
"read_count",
"if",
"decrypted_data_offset",
"<",
"self",
".",
"_decrypted_data_size",
":",
"self",
".",
"_decrypted_data_offset",
"=",
"decrypted_data_offset",
"break",
"decrypted_data_offset",
"-=",
"self",
".",
"_decrypted_data_size"
] |
Argparse logic lives here .
|
def _get_args ( args ) : parser = argparse . ArgumentParser ( description = 'A tool to extract features into a simple format.' , formatter_class = argparse . ArgumentDefaultsHelpFormatter , ) parser . add_argument ( '--no-cache' , action = 'store_true' ) parser . add_argument ( '--deploy' , action = 'store_true' ) parser . add_argument ( '--cache-path' , type = str , default = 'fex-cache.pckl' , help = 'Path for cache file' ) parser . add_argument ( '--path' , type = str , default = 'features.csv' , help = 'Path to write the dataset to' ) args = parser . parse_args ( args ) if args . no_cache : args . cache_path = None return args
| 4,839
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L75-L93
|
[
"def",
"read_avro",
"(",
"file_path_or_buffer",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"file_path_or_buffer",
",",
"six",
".",
"string_types",
")",
":",
"with",
"open",
"(",
"file_path_or_buffer",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"__file_to_dataframe",
"(",
"f",
",",
"schema",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"__file_to_dataframe",
"(",
"file_path_or_buffer",
",",
"schema",
",",
"*",
"*",
"kwargs",
")"
] |
Parse arguments provided on the commandline and execute extractors .
|
def run ( * extractor_list , * * kwargs ) : args = _get_args ( kwargs . get ( 'args' ) ) n_extractors = len ( extractor_list ) log . info ( 'Going to run list of {} FeatureExtractors' . format ( n_extractors ) ) collection = fex . Collection ( cache_path = args . cache_path ) for extractor in extractor_list : collection . add_feature_extractor ( extractor ) out_path = args . path if args . deploy : out_path = _prefix_git_hash ( out_path ) collection . run ( out_path )
| 4,840
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/runner.py#L110-L122
|
[
"def",
"ensure_compatible_admin",
"(",
"view",
")",
":",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"user_roles",
"=",
"request",
".",
"user",
".",
"user_data",
".",
"get",
"(",
"'roles'",
",",
"[",
"]",
")",
"if",
"len",
"(",
"user_roles",
")",
"!=",
"1",
":",
"context",
"=",
"{",
"'message'",
":",
"'I need to be able to manage user accounts. '",
"'My username is %s'",
"%",
"request",
".",
"user",
".",
"username",
"}",
"return",
"render",
"(",
"request",
",",
"'mtp_common/user_admin/incompatible-admin.html'",
",",
"context",
"=",
"context",
")",
"return",
"view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
This will delete existing instances with the same extension
|
def _delete_existing ( self , project_file , session ) : # remove existing grid if exists existing_elev = session . query ( RasterMapFile ) . filter ( RasterMapFile . projectFile == project_file ) . filter ( RasterMapFile . fileExtension == self . fileExtension ) . all ( ) if existing_elev : session . delete ( existing_elev ) session . commit ( )
| 4,841
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L81-L92
|
[
"def",
"get_time_offset",
"(",
")",
":",
"try",
":",
"resp",
"=",
"webapi",
".",
"post",
"(",
"'ITwoFactorService'",
",",
"'QueryTime'",
",",
"1",
",",
"params",
"=",
"{",
"'http_timeout'",
":",
"10",
"}",
")",
"except",
":",
"return",
"None",
"ts",
"=",
"int",
"(",
"time",
"(",
")",
")",
"return",
"int",
"(",
"resp",
".",
"get",
"(",
"'response'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'server_time'",
",",
"ts",
")",
")",
"-",
"ts"
] |
Loads grass ASCII to object
|
def _load_raster_text ( self , raster_path ) : # Open file and read plain text into text field with open ( raster_path , 'r' ) as f : self . rasterText = f . read ( ) # Retrieve metadata from header lines = self . rasterText . split ( '\n' ) for line in lines [ 0 : 6 ] : spline = line . split ( ) if 'north' in spline [ 0 ] . lower ( ) : self . north = float ( spline [ 1 ] ) elif 'south' in spline [ 0 ] . lower ( ) : self . south = float ( spline [ 1 ] ) elif 'east' in spline [ 0 ] . lower ( ) : self . east = float ( spline [ 1 ] ) elif 'west' in spline [ 0 ] . lower ( ) : self . west = float ( spline [ 1 ] ) elif 'rows' in spline [ 0 ] . lower ( ) : self . rows = int ( spline [ 1 ] ) elif 'cols' in spline [ 0 ] . lower ( ) : self . columns = int ( spline [ 1 ] )
| 4,842
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L94-L118
|
[
"def",
"variable_state",
"(",
"cls",
",",
"scripts",
",",
"variables",
")",
":",
"def",
"conditionally_set_not_modified",
"(",
")",
":",
"\"\"\"Set the variable to modified if it hasn't been altered.\"\"\"",
"state",
"=",
"variables",
".",
"get",
"(",
"block",
".",
"args",
"[",
"0",
"]",
",",
"None",
")",
"if",
"state",
"==",
"cls",
".",
"STATE_NOT_MODIFIED",
":",
"variables",
"[",
"block",
".",
"args",
"[",
"0",
"]",
"]",
"=",
"cls",
".",
"STATE_MODIFIED",
"green_flag",
",",
"other",
"=",
"partition_scripts",
"(",
"scripts",
",",
"cls",
".",
"HAT_GREEN_FLAG",
")",
"variables",
"=",
"dict",
"(",
"(",
"x",
",",
"cls",
".",
"STATE_NOT_MODIFIED",
")",
"for",
"x",
"in",
"variables",
")",
"for",
"script",
"in",
"green_flag",
":",
"in_zone",
"=",
"True",
"for",
"name",
",",
"level",
",",
"block",
"in",
"cls",
".",
"iter_blocks",
"(",
"script",
".",
"blocks",
")",
":",
"if",
"name",
"==",
"'broadcast %s and wait'",
":",
"in_zone",
"=",
"False",
"if",
"name",
"==",
"'set %s effect to %s'",
":",
"state",
"=",
"variables",
".",
"get",
"(",
"block",
".",
"args",
"[",
"0",
"]",
",",
"None",
")",
"if",
"state",
"is",
"None",
":",
"continue",
"# Not a variable we care about",
"if",
"in_zone",
"and",
"level",
"==",
"0",
":",
"# Success!",
"if",
"state",
"==",
"cls",
".",
"STATE_NOT_MODIFIED",
":",
"state",
"=",
"cls",
".",
"STATE_INITIALIZED",
"else",
":",
"# Multiple when green flag clicked conflict",
"# TODO: Need to allow multiple sets of a variable",
"# within the same script",
"# print 'CONFLICT', script",
"state",
"=",
"cls",
".",
"STATE_MODIFIED",
"elif",
"in_zone",
":",
"continue",
"# Conservative ignore for nested absolutes",
"elif",
"state",
"==",
"cls",
".",
"STATE_NOT_MODIFIED",
":",
"state",
"=",
"cls",
".",
"STATE_MODIFIED",
"variables",
"[",
"block",
".",
"args",
"[",
"0",
"]",
"]",
"=",
"state",
"elif",
"name",
"==",
"'change %s effect by %s'",
":",
"conditionally_set_not_modified",
"(",
")",
"for",
"script",
"in",
"other",
":",
"for",
"name",
",",
"_",
",",
"block",
"in",
"cls",
".",
"iter_blocks",
"(",
"script",
".",
"blocks",
")",
":",
"if",
"name",
"in",
"(",
"'change %s effect by %s'",
",",
"'set %s effect to %s'",
")",
":",
"conditionally_set_not_modified",
"(",
")",
"return",
"variables"
] |
Raster Map File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Assign file extension attribute to file object self . fileExtension = extension self . filename = filename self . _load_raster_text ( path ) if spatial : # Get well known binary from the raster file using the MapKit RasterLoader wkbRaster = RasterLoader . grassAsciiRasterToWKB ( session = session , grassRasterPath = path , srid = str ( spatialReferenceID ) , noData = '0' ) self . raster = wkbRaster
| 4,843
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L120-L136
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Raster Map File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # If the raster field is not empty, write from this field if self . raster is not None : # Configure RasterConverter converter = RasterConverter ( session ) # Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid grassAsciiGrid = converter . getAsGrassAsciiRaster ( rasterFieldName = 'raster' , tableName = self . __tablename__ , rasterIdFieldName = 'id' , rasterId = self . id ) # Write to file openFile . write ( grassAsciiGrid ) elif self . rasterText is not None : # Write file openFile . write ( self . rasterText )
| 4,844
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L138-L157
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Wrapper for GsshaPyFileObjectBase write method
|
def write ( self , session , directory , name , replaceParamFile = None , * * kwargs ) : if self . raster is not None or self . rasterText is not None : super ( RasterMapFile , self ) . write ( session , directory , name , replaceParamFile , * * kwargs )
| 4,845
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/map.py#L159-L164
|
[
"def",
"indication",
"(",
"self",
",",
"apdu",
")",
":",
"if",
"_debug",
":",
"ServerSSM",
".",
"_debug",
"(",
"\"indication %r\"",
",",
"apdu",
")",
"if",
"self",
".",
"state",
"==",
"IDLE",
":",
"self",
".",
"idle",
"(",
"apdu",
")",
"elif",
"self",
".",
"state",
"==",
"SEGMENTED_REQUEST",
":",
"self",
".",
"segmented_request",
"(",
"apdu",
")",
"elif",
"self",
".",
"state",
"==",
"AWAIT_RESPONSE",
":",
"self",
".",
"await_response",
"(",
"apdu",
")",
"elif",
"self",
".",
"state",
"==",
"SEGMENTED_RESPONSE",
":",
"self",
".",
"segmented_response",
"(",
"apdu",
")",
"else",
":",
"if",
"_debug",
":",
"ServerSSM",
".",
"_debug",
"(",
"\" - invalid state\"",
")"
] |
Simple Slugify .
|
def slugify ( value ) : s1 = first_cap_re . sub ( r'\1_\2' , value ) s2 = all_cap_re . sub ( r'\1_\2' , s1 ) return s2 . lower ( ) . replace ( ' _' , '_' ) . replace ( ' ' , '_' )
| 4,846
|
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/utils.py#L23-L27
|
[
"def",
"_generate_noise_system",
"(",
"dimensions_tr",
",",
"spatial_sd",
",",
"temporal_sd",
",",
"spatial_noise_type",
"=",
"'gaussian'",
",",
"temporal_noise_type",
"=",
"'gaussian'",
",",
")",
":",
"def",
"noise_volume",
"(",
"dimensions",
",",
"noise_type",
",",
")",
":",
"if",
"noise_type",
"==",
"'rician'",
":",
"# Generate the Rician noise (has an SD of 1)",
"noise",
"=",
"stats",
".",
"rice",
".",
"rvs",
"(",
"b",
"=",
"0",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"1.527",
",",
"size",
"=",
"dimensions",
")",
"elif",
"noise_type",
"==",
"'exponential'",
":",
"# Make an exponential distribution (has an SD of 1)",
"noise",
"=",
"stats",
".",
"expon",
".",
"rvs",
"(",
"0",
",",
"scale",
"=",
"1",
",",
"size",
"=",
"dimensions",
")",
"elif",
"noise_type",
"==",
"'gaussian'",
":",
"noise",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"np",
".",
"prod",
"(",
"dimensions",
")",
")",
".",
"reshape",
"(",
"dimensions",
")",
"# Return the noise",
"return",
"noise",
"# Get just the xyz coordinates",
"dimensions",
"=",
"np",
".",
"asarray",
"(",
"[",
"dimensions_tr",
"[",
"0",
"]",
",",
"dimensions_tr",
"[",
"1",
"]",
",",
"dimensions_tr",
"[",
"2",
"]",
",",
"1",
"]",
")",
"# Generate noise",
"spatial_noise",
"=",
"noise_volume",
"(",
"dimensions",
",",
"spatial_noise_type",
")",
"temporal_noise",
"=",
"noise_volume",
"(",
"dimensions_tr",
",",
"temporal_noise_type",
")",
"# Make the system noise have a specific spatial variability",
"spatial_noise",
"*=",
"spatial_sd",
"# Set the size of the noise",
"temporal_noise",
"*=",
"temporal_sd",
"# The mean in time of system noise needs to be zero, so subtract the",
"# means of the temporal noise in time",
"temporal_noise_mean",
"=",
"np",
".",
"mean",
"(",
"temporal_noise",
",",
"3",
")",
".",
"reshape",
"(",
"dimensions",
"[",
"0",
"]",
",",
"dimensions",
"[",
"1",
"]",
",",
"dimensions",
"[",
"2",
"]",
",",
"1",
")",
"temporal_noise",
"=",
"temporal_noise",
"-",
"temporal_noise_mean",
"# Save the combination",
"system_noise",
"=",
"spatial_noise",
"+",
"temporal_noise",
"return",
"system_noise"
] |
Mark the decorated command as the intended entrypoint of the command module .
|
def entrypoint ( cls ) : if not isinstance ( cls , type ) or not issubclass ( cls , Command ) : raise TypeError ( f"inappropriate entrypoint instance of type {cls.__class__}" ) cls . _argcmdr_entrypoint_ = True return cls
| 4,847
|
https://github.com/dssg/argcmdr/blob/346b6158987464c3d3a32d315f3800a4807744b4/src/argcmdr.py#L767-L775
|
[
"def",
"same_types",
"(",
"self",
",",
"index1",
",",
"index2",
")",
":",
"try",
":",
"same",
"=",
"self",
".",
"table",
"[",
"index1",
"]",
".",
"type",
"==",
"self",
".",
"table",
"[",
"index2",
"]",
".",
"type",
"!=",
"SharedData",
".",
"TYPES",
".",
"NO_TYPE",
"except",
"Exception",
":",
"self",
".",
"error",
"(",
")",
"return",
"same"
] |
Construct an argparse action which stores the value of a command line option to override a corresponding value in the process environment .
|
def store_env_override ( option_strings , dest , envvar , nargs = None , default = None , type = None , choices = None , description = None , help = None , metavar = None ) : if envvar == '' : raise ValueError ( "unsupported environment variable name" , envvar ) envvalue = os . getenv ( envvar ) if callable ( default ) : default_value = default ( envvalue ) elif envvalue : default_value = envvalue else : default_value = default if description and help : raise ValueError ( "only specify help to override its optional generation from " "description -- not both" ) elif description : if default_value : help = '{} (default {} envvar {}: {})' . format ( description , 'provided by' if default is None else 'derived from' , envvar , default_value , ) else : help = ( f'{description} (required because ' f'envvar {envvar} is empty)' ) return argparse . _StoreAction ( option_strings = option_strings , dest = dest , nargs = nargs , const = None , default = default_value , type = type , choices = choices , required = ( not default_value ) , help = help , metavar = metavar , )
| 4,848
|
https://github.com/dssg/argcmdr/blob/346b6158987464c3d3a32d315f3800a4807744b4/src/argcmdr.py#L785-L852
|
[
"def",
"check_keypoints",
"(",
"keypoints",
",",
"rows",
",",
"cols",
")",
":",
"for",
"kp",
"in",
"keypoints",
":",
"check_keypoint",
"(",
"kp",
",",
"rows",
",",
"cols",
")"
] |
Return a dict with ind_id as key and Individual as values .
|
def individual_dict ( self , ind_ids ) : ind_dict = { ind . ind_id : ind for ind in self . individuals ( ind_ids = ind_ids ) } return ind_dict
| 4,849
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/base.py#L35-L38
|
[
"def",
"run",
"(",
"self",
")",
":",
"elapsed",
"=",
"0",
"run_time",
"=",
"self",
".",
"config",
"[",
"'run_time'",
"]",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"t",
"=",
"time",
".",
"time",
"self",
".",
"turrets_manager",
".",
"start",
"(",
"self",
".",
"transaction_context",
")",
"self",
".",
"started",
"=",
"True",
"while",
"elapsed",
"<=",
"run_time",
":",
"try",
":",
"self",
".",
"_run_loop_action",
"(",
")",
"self",
".",
"_print_status",
"(",
"elapsed",
")",
"elapsed",
"=",
"t",
"(",
")",
"-",
"start_time",
"except",
"(",
"Exception",
",",
"KeyboardInterrupt",
")",
":",
"print",
"(",
"\"\\nStopping test, sending stop command to turrets\"",
")",
"self",
".",
"turrets_manager",
".",
"stop",
"(",
")",
"self",
".",
"stats_handler",
".",
"write_remaining",
"(",
")",
"traceback",
".",
"print_exc",
"(",
")",
"break",
"self",
".",
"turrets_manager",
".",
"stop",
"(",
")",
"print",
"(",
"\"\\n\\nProcessing all remaining messages... This could take time depending on message volume\"",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"result_collector",
".",
"unbind",
"(",
"self",
".",
"result_collector",
".",
"LAST_ENDPOINT",
")",
"self",
".",
"_clean_queue",
"(",
")",
"print",
"(",
"\"took %s\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t",
")",
")"
] |
clean - remove build artifacts .
|
def clean ( ) : run ( 'rm -rf build/' ) run ( 'rm -rf dist/' ) run ( 'rm -rf puzzle.egg-info' ) run ( 'find . -name __pycache__ -delete' ) run ( 'find . -name *.pyc -delete' ) run ( 'find . -name *.pyo -delete' ) run ( 'find . -name *~ -delete' ) log . info ( 'cleaned up' )
| 4,850
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/tasks.py#L10-L20
|
[
"def",
"account_groups_and_extra_data",
"(",
"account",
",",
"resource",
",",
"refresh_timedelta",
"=",
"None",
")",
":",
"updated",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"modified_since",
"=",
"updated",
"if",
"refresh_timedelta",
"is",
"not",
"None",
":",
"modified_since",
"+=",
"refresh_timedelta",
"modified_since",
"=",
"modified_since",
".",
"isoformat",
"(",
")",
"last_update",
"=",
"account",
".",
"extra_data",
".",
"get",
"(",
"'updated'",
",",
"modified_since",
")",
"if",
"last_update",
">",
"modified_since",
":",
"return",
"account",
".",
"extra_data",
".",
"get",
"(",
"'groups'",
",",
"[",
"]",
")",
"groups",
"=",
"fetch_groups",
"(",
"resource",
"[",
"'Group'",
"]",
")",
"extra_data",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'OAUTHCLIENT_CERN_EXTRA_DATA_SERIALIZER'",
",",
"fetch_extra_data",
")",
"(",
"resource",
")",
"account",
".",
"extra_data",
".",
"update",
"(",
"groups",
"=",
"groups",
",",
"updated",
"=",
"updated",
".",
"isoformat",
"(",
")",
",",
"*",
"*",
"extra_data",
")",
"return",
"groups"
] |
0mq fanout sub .
|
def zmq_sub ( bind , tables , forwarder = False , green = False ) : logger = logging . getLogger ( "meepo.sub.zmq_sub" ) if not isinstance ( tables , ( list , set ) ) : raise ValueError ( "tables should be list or set" ) if not green : import zmq else : import zmq . green as zmq ctx = zmq . Context ( ) socket = ctx . socket ( zmq . PUB ) if forwarder : socket . connect ( bind ) else : socket . bind ( bind ) events = ( "%s_%s" % ( tb , action ) for tb , action in itertools . product ( * [ tables , [ "write" , "update" , "delete" ] ] ) ) for event in events : def _sub ( pk , event = event ) : msg = "%s %s" % ( event , pk ) socket . send_string ( msg ) logger . debug ( "pub msg: %s" % msg ) signal ( event ) . connect ( _sub , weak = False ) return socket
| 4,851
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/sub/zmq.py#L11-L48
|
[
"def",
"save_and_validate_logo",
"(",
"logo_stream",
",",
"logo_filename",
",",
"community_id",
")",
":",
"cfg",
"=",
"current_app",
".",
"config",
"logos_bucket_id",
"=",
"cfg",
"[",
"'COMMUNITIES_BUCKET_UUID'",
"]",
"logo_max_size",
"=",
"cfg",
"[",
"'COMMUNITIES_LOGO_MAX_SIZE'",
"]",
"logos_bucket",
"=",
"Bucket",
".",
"query",
".",
"get",
"(",
"logos_bucket_id",
")",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"logo_filename",
")",
"[",
"1",
"]",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"if",
"ext",
".",
"startswith",
"(",
"'.'",
")",
"else",
"ext",
"logo_stream",
".",
"seek",
"(",
"SEEK_SET",
",",
"SEEK_END",
")",
"# Seek from beginning to end",
"logo_size",
"=",
"logo_stream",
".",
"tell",
"(",
")",
"if",
"logo_size",
">",
"logo_max_size",
":",
"return",
"None",
"if",
"ext",
"in",
"cfg",
"[",
"'COMMUNITIES_LOGO_EXTENSIONS'",
"]",
":",
"key",
"=",
"\"{0}/logo.{1}\"",
".",
"format",
"(",
"community_id",
",",
"ext",
")",
"logo_stream",
".",
"seek",
"(",
"0",
")",
"# Rewind the stream to the beginning",
"ObjectVersion",
".",
"create",
"(",
"logos_bucket",
",",
"key",
",",
"stream",
"=",
"logo_stream",
",",
"size",
"=",
"logo_size",
")",
"return",
"ext",
"else",
":",
"return",
"None"
] |
Load a case with individuals .
|
def add_case ( self , case_obj , vtype = 'snv' , mode = 'vcf' , ped_svg = None ) : new_case = Case ( case_id = case_obj . case_id , name = case_obj . name , variant_source = case_obj . variant_source , variant_type = vtype , variant_mode = mode , pedigree = ped_svg , compressed = case_obj . compressed , tabix_index = case_obj . tabix_index ) # build individuals inds = [ Individual ( ind_id = ind . ind_id , name = ind . name , mother = ind . mother , father = ind . father , sex = ind . sex , phenotype = ind . phenotype , ind_index = ind . ind_index , variant_source = ind . variant_source , bam_path = ind . bam_path , ) for ind in case_obj . individuals ] new_case . individuals = inds if self . case ( new_case . case_id ) : logger . warning ( "Case already exists in database!" ) else : self . session . add ( new_case ) self . save ( ) return new_case
| 4,852
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/case.py#L15-L49
|
[
"def",
"check_user_token",
"(",
"self",
",",
"request",
",",
"user",
")",
":",
"if",
"not",
"app_settings",
".",
"REST_USER_TOKEN_ENABLED",
":",
"return",
"False",
"try",
":",
"token",
"=",
"Token",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"user",
",",
"key",
"=",
"request",
".",
"data",
".",
"get",
"(",
"'password'",
")",
")",
"except",
"Token",
".",
"DoesNotExist",
":",
"token",
"=",
"None",
"else",
":",
"if",
"app_settings",
".",
"DISPOSABLE_USER_TOKEN",
":",
"token",
".",
"delete",
"(",
")",
"finally",
":",
"return",
"token",
"is",
"not",
"None"
] |
Fetch all individuals from the database .
|
def individuals ( self , ind_ids = None ) : query = self . query ( Individual ) if ind_ids : query = query . filter ( Individual . ind_id . in_ ( ind_ids ) ) return query
| 4,853
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/case.py#L92-L97
|
[
"def",
"start_vm",
"(",
"access_token",
",",
"subscription_id",
",",
"resource_group",
",",
"vm_name",
")",
":",
"endpoint",
"=",
"''",
".",
"join",
"(",
"[",
"get_rm_endpoint",
"(",
")",
",",
"'/subscriptions/'",
",",
"subscription_id",
",",
"'/resourceGroups/'",
",",
"resource_group",
",",
"'/providers/Microsoft.Compute/virtualMachines/'",
",",
"vm_name",
",",
"'/start'",
",",
"'?api-version='",
",",
"COMP_API",
"]",
")",
"return",
"do_post",
"(",
"endpoint",
",",
"''",
",",
"access_token",
")"
] |
Return only comments made on the case .
|
def case_comments ( self ) : comments = ( comment for comment in self . comments if comment . variant_id is None ) return comments
| 4,854
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/sql/models.py#L55-L59
|
[
"def",
"ttl",
"(",
"self",
",",
"value",
")",
":",
"# get timer",
"timer",
"=",
"getattr",
"(",
"self",
",",
"Annotation",
".",
"__TIMER",
",",
"None",
")",
"# if timer is running, stop the timer",
"if",
"timer",
"is",
"not",
"None",
":",
"timer",
".",
"cancel",
"(",
")",
"# initialize timestamp",
"timestamp",
"=",
"None",
"# if value is None",
"if",
"value",
"is",
"None",
":",
"# nonify timer",
"timer",
"=",
"None",
"else",
":",
"# else, renew a timer",
"# get timestamp",
"timestamp",
"=",
"time",
"(",
")",
"+",
"value",
"# start a new timer",
"timer",
"=",
"Timer",
"(",
"value",
",",
"self",
".",
"__del__",
")",
"timer",
".",
"start",
"(",
")",
"# set/update attributes",
"setattr",
"(",
"self",
",",
"Annotation",
".",
"__TIMER",
",",
"timer",
")",
"setattr",
"(",
"self",
",",
"Annotation",
".",
"__TS",
",",
"timestamp",
")"
] |
Send a PUT request .
|
def put ( self , url , body = None , * * kwargs ) : return self . request ( 'put' , url , body = body , * * kwargs )
| 4,855
|
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/http_client.py#L62-L73
|
[
"def",
"_generate_examples_validation",
"(",
"self",
",",
"archive",
",",
"labels",
")",
":",
"# Get the current random seeds.",
"numpy_st0",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"# Set new random seeds.",
"np",
".",
"random",
".",
"seed",
"(",
"135",
")",
"logging",
".",
"warning",
"(",
"'Overwriting cv2 RNG seed.'",
")",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"setRNGSeed",
"(",
"357",
")",
"for",
"example",
"in",
"super",
"(",
"Imagenet2012Corrupted",
",",
"self",
")",
".",
"_generate_examples_validation",
"(",
"archive",
",",
"labels",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"tf_img",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"example",
"[",
"'image'",
"]",
".",
"read",
"(",
")",
",",
"channels",
"=",
"3",
")",
"image_np",
"=",
"tfds",
".",
"as_numpy",
"(",
"tf_img",
")",
"example",
"[",
"'image'",
"]",
"=",
"self",
".",
"_get_corrupted_example",
"(",
"image_np",
")",
"yield",
"example",
"# Reset the seeds back to their original values.",
"np",
".",
"random",
".",
"set_state",
"(",
"numpy_st0",
")"
] |
Topic callback registry .
|
def event ( self , * topics , * * kwargs ) : workers = kwargs . pop ( "workers" , 1 ) multi = kwargs . pop ( "multi" , False ) queue_limit = kwargs . pop ( "queue_limit" , 10000 ) def wrapper ( func ) : for topic in topics : queues = [ Queue ( ) for _ in range ( workers ) ] hash_ring = ketama . Continuum ( ) for q in queues : hash_ring [ str ( hash ( q ) ) ] = q self . worker_queues [ topic ] = hash_ring self . workers [ topic ] = WorkerPool ( queues , topic , func , multi = multi , queue_limit = queue_limit , logger_name = "%s.%s" % ( self . name , topic ) ) self . socket . setsockopt ( zmq . SUBSCRIBE , asbytes ( topic ) ) return func return wrapper
| 4,856
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/replicator/queue.py#L31-L63
|
[
"def",
"delete_secret_versions",
"(",
"self",
",",
"path",
",",
"versions",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"if",
"not",
"isinstance",
"(",
"versions",
",",
"list",
")",
"or",
"len",
"(",
"versions",
")",
"==",
"0",
":",
"error_msg",
"=",
"'argument to \"versions\" must be a list containing one or more integers, \"{versions}\" provided.'",
".",
"format",
"(",
"versions",
"=",
"versions",
")",
"raise",
"exceptions",
".",
"ParamValidationError",
"(",
"error_msg",
")",
"params",
"=",
"{",
"'versions'",
":",
"versions",
",",
"}",
"api_path",
"=",
"'/v1/{mount_point}/delete/{path}'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
",",
"path",
"=",
"path",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
] |
Run the replicator .
|
def run ( self ) : for worker_pool in self . workers . values ( ) : worker_pool . start ( ) if isinstance ( self . listen , list ) : for i in self . listen : self . socket . connect ( i ) else : self . socket . connect ( self . listen ) try : while True : msg = self . socket . recv_string ( ) lst = msg . split ( ) if len ( lst ) == 2 : topic , pks = lst [ 0 ] , [ lst [ 1 ] , ] elif len ( lst ) > 2 : topic , pks = lst [ 0 ] , lst [ 1 : ] else : self . logger . error ( "msg corrupt -> %s" % msg ) continue self . logger . debug ( "replicator: {0} -> {1}" . format ( topic , pks ) ) for pk in pks : self . worker_queues [ topic ] [ str ( hash ( pk ) ) ] . put ( pk ) except Exception as e : self . logger . exception ( e ) finally : for worker_pool in self . workers . values ( ) : worker_pool . terminate ( )
| 4,857
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/replicator/queue.py#L65-L98
|
[
"def",
"defBoundary",
"(",
"self",
")",
":",
"self",
".",
"BoroCnstNatAll",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"StateCount",
")",
"+",
"np",
".",
"nan",
"# Find the natural borrowing constraint conditional on next period's state",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"StateCount",
")",
":",
"PermShkMinNext",
"=",
"np",
".",
"min",
"(",
"self",
".",
"IncomeDstn_list",
"[",
"j",
"]",
"[",
"1",
"]",
")",
"TranShkMinNext",
"=",
"np",
".",
"min",
"(",
"self",
".",
"IncomeDstn_list",
"[",
"j",
"]",
"[",
"2",
"]",
")",
"self",
".",
"BoroCnstNatAll",
"[",
"j",
"]",
"=",
"(",
"self",
".",
"solution_next",
".",
"mNrmMin",
"[",
"j",
"]",
"-",
"TranShkMinNext",
")",
"*",
"(",
"self",
".",
"PermGroFac_list",
"[",
"j",
"]",
"*",
"PermShkMinNext",
")",
"/",
"self",
".",
"Rfree_list",
"[",
"j",
"]",
"self",
".",
"BoroCnstNat_list",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"StateCount",
")",
"+",
"np",
".",
"nan",
"self",
".",
"mNrmMin_list",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"StateCount",
")",
"+",
"np",
".",
"nan",
"self",
".",
"BoroCnstDependency",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"StateCount",
",",
"self",
".",
"StateCount",
")",
")",
"+",
"np",
".",
"nan",
"# The natural borrowing constraint in each current state is the *highest*",
"# among next-state-conditional natural borrowing constraints that could",
"# occur from this current state.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"StateCount",
")",
":",
"possible_next_states",
"=",
"self",
".",
"MrkvArray",
"[",
"i",
",",
":",
"]",
">",
"0",
"self",
".",
"BoroCnstNat_list",
"[",
"i",
"]",
"=",
"np",
".",
"max",
"(",
"self",
".",
"BoroCnstNatAll",
"[",
"possible_next_states",
"]",
")",
"# Explicitly handle the \"None\" case: ",
"if",
"self",
".",
"BoroCnstArt",
"is",
"None",
":",
"self",
".",
"mNrmMin_list",
"[",
"i",
"]",
"=",
"self",
".",
"BoroCnstNat_list",
"[",
"i",
"]",
"else",
":",
"self",
".",
"mNrmMin_list",
"[",
"i",
"]",
"=",
"np",
".",
"max",
"(",
"[",
"self",
".",
"BoroCnstNat_list",
"[",
"i",
"]",
",",
"self",
".",
"BoroCnstArt",
"]",
")",
"self",
".",
"BoroCnstDependency",
"[",
"i",
",",
":",
"]",
"=",
"self",
".",
"BoroCnstNat_list",
"[",
"i",
"]",
"==",
"self",
".",
"BoroCnstNatAll"
] |
Get pk values from object
|
def _pk ( self , obj ) : pk_values = tuple ( getattr ( obj , c . name ) for c in obj . __mapper__ . primary_key ) if len ( pk_values ) == 1 : return pk_values [ 0 ] return pk_values
| 4,858
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L135-L144
|
[
"def",
"beacon",
"(",
"config",
")",
":",
"parts",
"=",
"psutil",
".",
"disk_partitions",
"(",
"all",
"=",
"True",
")",
"ret",
"=",
"[",
"]",
"for",
"mounts",
"in",
"config",
":",
"mount",
"=",
"next",
"(",
"iter",
"(",
"mounts",
")",
")",
"# Because we're using regular expressions",
"# if our mount doesn't end with a $, insert one.",
"mount_re",
"=",
"mount",
"if",
"not",
"mount",
".",
"endswith",
"(",
"'$'",
")",
":",
"mount_re",
"=",
"'{0}$'",
".",
"format",
"(",
"mount",
")",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"# mount_re comes in formatted with a $ at the end",
"# can be `C:\\\\$` or `C:\\\\\\\\$`",
"# re string must be like `C:\\\\\\\\` regardless of \\\\ or \\\\\\\\",
"# also, psutil returns uppercase",
"mount_re",
"=",
"re",
".",
"sub",
"(",
"r':\\\\\\$'",
",",
"r':\\\\\\\\'",
",",
"mount_re",
")",
"mount_re",
"=",
"re",
".",
"sub",
"(",
"r':\\\\\\\\\\$'",
",",
"r':\\\\\\\\'",
",",
"mount_re",
")",
"mount_re",
"=",
"mount_re",
".",
"upper",
"(",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"re",
".",
"match",
"(",
"mount_re",
",",
"part",
".",
"mountpoint",
")",
":",
"_mount",
"=",
"part",
".",
"mountpoint",
"try",
":",
"_current_usage",
"=",
"psutil",
".",
"disk_usage",
"(",
"_mount",
")",
"except",
"OSError",
":",
"log",
".",
"warning",
"(",
"'%s is not a valid mount point.'",
",",
"_mount",
")",
"continue",
"current_usage",
"=",
"_current_usage",
".",
"percent",
"monitor_usage",
"=",
"mounts",
"[",
"mount",
"]",
"if",
"'%'",
"in",
"monitor_usage",
":",
"monitor_usage",
"=",
"re",
".",
"sub",
"(",
"'%'",
",",
"''",
",",
"monitor_usage",
")",
"monitor_usage",
"=",
"float",
"(",
"monitor_usage",
")",
"if",
"current_usage",
">=",
"monitor_usage",
":",
"ret",
".",
"append",
"(",
"{",
"'diskusage'",
":",
"current_usage",
",",
"'mount'",
":",
"_mount",
"}",
")",
"return",
"ret"
] |
Record the sqlalchemy object states in the middle of session prepare the events for the final pub in session_commit .
|
def session_update ( self , session , * _ ) : self . _session_init ( session ) session . pending_write |= set ( session . new ) session . pending_update |= set ( session . dirty ) session . pending_delete |= set ( session . deleted ) self . logger . debug ( "%s - session_update" % session . meepo_unique_id )
| 4,859
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L201-L209
|
[
"def",
"_ReadLayoutDataTypeDefinition",
"(",
"self",
",",
"definitions_registry",
",",
"definition_values",
",",
"data_type_definition_class",
",",
"definition_name",
",",
"supported_definition_values",
")",
":",
"return",
"self",
".",
"_ReadDataTypeDefinition",
"(",
"definitions_registry",
",",
"definition_values",
",",
"data_type_definition_class",
",",
"definition_name",
",",
"supported_definition_values",
")"
] |
Pub the events after the session committed .
|
def session_commit ( self , session ) : # this may happen when there's nothing to commit if not hasattr ( session , 'meepo_unique_id' ) : self . logger . debug ( "skipped - session_commit" ) return self . _session_pub ( session ) self . _session_del ( session )
| 4,860
|
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/sqlalchemy.py#L211-L222
|
[
"def",
"_encode_filename",
"(",
"filename",
")",
":",
"# pragma: no cover",
"# Don't replace unknown characters as '?' is forbidden in Windows filenames",
"errors",
"=",
"'ignore'",
"if",
"os",
".",
"name",
"==",
"'nt'",
"else",
"'replace'",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"bytes",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
"and",
"cairo",
".",
"cairo_version",
"(",
")",
">=",
"11510",
":",
"# Since 1.15.10, cairo uses utf-8 filenames on Windows",
"filename",
"=",
"filename",
".",
"encode",
"(",
"'utf-8'",
",",
"errors",
"=",
"errors",
")",
"else",
":",
"try",
":",
"filename",
"=",
"filename",
".",
"encode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"except",
"UnicodeEncodeError",
":",
"# Use plain ASCII filenames as fallback",
"filename",
"=",
"filename",
".",
"encode",
"(",
"'ascii'",
",",
"errors",
"=",
"errors",
")",
"# TODO: avoid characters forbidden in filenames?",
"return",
"ffi",
".",
"new",
"(",
"'char[]'",
",",
"filename",
")"
] |
Send basic auth username and password .
|
def add_basic_auth ( dolt , username , password ) : return dolt . with_headers ( Authorization = 'Basic %s' % base64 . b64encode ( '%s:%s' % ( username , password ) ) . strip ( ) )
| 4,861
|
https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/helpers.py#L3-L22
|
[
"def",
"setOverlayTextureColorSpace",
"(",
"self",
",",
"ulOverlayHandle",
",",
"eTextureColorSpace",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"setOverlayTextureColorSpace",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"eTextureColorSpace",
")",
"return",
"result"
] |
Add the genotypes for a variant for all individuals
|
def _add_genotypes ( self , variant_obj , gemini_variant , case_id , individual_objs ) : for ind in individual_objs : index = ind . ind_index variant_obj . add_individual ( Genotype ( sample_id = ind . ind_id , genotype = gemini_variant [ 'gts' ] [ index ] , case_id = case_id , phenotype = ind . phenotype , ref_depth = gemini_variant [ 'gt_ref_depths' ] [ index ] , alt_depth = gemini_variant [ 'gt_alt_depths' ] [ index ] , depth = gemini_variant [ 'gt_depths' ] [ index ] , genotype_quality = gemini_variant [ 'gt_quals' ] [ index ] ) )
| 4,862
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant_extras/genotypes.py#L11-L33
|
[
"def",
"handle",
"(",
"self",
",",
"error",
",",
"connection",
")",
":",
"error_class",
"=",
"error",
".",
"__class__",
"if",
"error_class",
"in",
"(",
"ConnectionExpired",
",",
"ServiceUnavailable",
",",
"DatabaseUnavailableError",
")",
":",
"self",
".",
"deactivate",
"(",
"connection",
".",
"address",
")",
"elif",
"error_class",
"in",
"(",
"NotALeaderError",
",",
"ForbiddenOnReadOnlyDatabaseError",
")",
":",
"self",
".",
"remove_writer",
"(",
"connection",
".",
"address",
")"
] |
Check and process frames argument into a proper iterable for an animation object
|
def process_frames_argument ( frames ) : result = None if np . iterable ( frames ) : try : frames_arr = np . array ( frames ) except : raise TypeError ( "'frames' should be convertable to numpy.array" ) for idx in range ( len ( frames_arr ) ) : frame_idx = frames_arr [ idx ] assert is_real_number ( frame_idx ) assert int ( frame_idx ) == frame_idx frames_arr [ idx ] = int ( frame_idx ) #self.frames = frames_arr result = frames_arr elif is_real_number ( frames ) : assert int ( frames ) == frames frames = int ( frames ) #self.frames = range(frames) result = range ( frames ) return result
| 4,863
|
https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/ani.py#L86-L120
|
[
"def",
"_diversity_metric",
"(",
"solution",
",",
"population",
")",
":",
"# Edge case for empty population",
"# If there are no other solutions, the given solution has maximum diversity",
"if",
"population",
"==",
"[",
"]",
":",
"return",
"1.0",
"return",
"(",
"sum",
"(",
"[",
"_manhattan_distance",
"(",
"solution",
",",
"other",
")",
"for",
"other",
"in",
"population",
"]",
")",
"# Normalize (assuming each value in solution is in range [0.0, 1.0])",
"# NOTE: len(solution) is maximum manhattan distance",
"/",
"(",
"len",
"(",
"population",
")",
"*",
"len",
"(",
"solution",
")",
")",
")"
] |
Initialize a database that store metadata
|
def init ( ctx , reset , root , phenomizer ) : configs = { } if root is None : root = ctx . obj . get ( 'root' ) or os . path . expanduser ( "~/.puzzle" ) configs [ 'root' ] = root if os . path . isfile ( root ) : logger . error ( "'root' can't be a file" ) ctx . abort ( ) logger . info ( "Root directory is: {}" . format ( root ) ) db_path = os . path . join ( root , 'puzzle_db.sqlite3' ) logger . info ( "db path is: {}" . format ( db_path ) ) resource_dir = os . path . join ( root , 'resources' ) logger . info ( "resource dir is: {}" . format ( resource_dir ) ) if os . path . exists ( resource_dir ) : logger . debug ( "Found puzzle directory: {0}" . format ( root ) ) if os . path . exists ( resource_dir ) and not reset : logger . warning ( "Puzzle db already in place" ) ctx . abort ( ) else : logger . info ( "Create directory: {0}" . format ( resource_dir ) ) os . makedirs ( resource_dir ) logger . debug ( 'Directory created' ) logger . debug ( 'Connect to database and create tables' ) store = SqlStore ( db_path ) store . set_up ( reset = reset ) if phenomizer : phenomizer = [ str ( term ) for term in phenomizer ] configs [ 'phenomizer_auth' ] = phenomizer if not ctx . obj . get ( 'config_path' ) : logger . info ( "Creating puzzle config file in {0}" . format ( PUZZLE_CONFIG_PATH ) ) with codecs . open ( PUZZLE_CONFIG_PATH , 'w' , encoding = 'utf-8' ) as f : f . write ( yaml . dump ( configs ) ) logger . debug ( "Config created" )
| 4,864
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/init.py#L23-L69
|
[
"def",
"analyze",
"(",
"self",
",",
"itemId",
"=",
"None",
",",
"filePath",
"=",
"None",
",",
"text",
"=",
"None",
",",
"fileType",
"=",
"\"csv\"",
",",
"analyzeParameters",
"=",
"None",
")",
":",
"files",
"=",
"[",
"]",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/analyze\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"fileType",
"=",
"\"csv\"",
"params",
"[",
"\"fileType\"",
"]",
"=",
"fileType",
"if",
"analyzeParameters",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"analyzeParameters",
",",
"AnalyzeParameters",
")",
":",
"params",
"[",
"'analyzeParameters'",
"]",
"=",
"analyzeParameters",
".",
"value",
"if",
"not",
"(",
"filePath",
"is",
"None",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"filePath",
")",
":",
"params",
"[",
"'text'",
"]",
"=",
"open",
"(",
"filePath",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"elif",
"itemId",
"is",
"not",
"None",
":",
"params",
"[",
"\"fileType\"",
"]",
"=",
"fileType",
"params",
"[",
"'itemId'",
"]",
"=",
"itemId",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"\"either an Item ID or a file path must be given.\"",
")"
] |
Return a bytestring representation of the value .
|
def encode ( value , encoding = 'utf-8' , encoding_errors = 'strict' ) : if isinstance ( value , bytes ) : return value if not isinstance ( value , basestring ) : value = str ( value ) if isinstance ( value , unicode ) : value = value . encode ( encoding , encoding_errors ) return value
| 4,865
|
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L53-L63
|
[
"def",
"remove_stale_javascripts",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing stale javascripts ...\"",
")",
"for",
"js",
"in",
"JAVASCRIPTS_TO_REMOVE",
":",
"logger",
".",
"info",
"(",
"\"Unregistering JS %s\"",
"%",
"js",
")",
"portal",
".",
"portal_javascripts",
".",
"unregisterResource",
"(",
"js",
")"
] |
Create nshares of the secret . threshold specifies the number of shares needed for reconstructing the secret value . A 0 - 16 bytes identifier must be provided . Optionally the secret is hashed with the algorithm specified by hash_id a class attribute of Hash . This function must return a list of formatted shares or raises a TSSError exception if anything went wrong .
|
def share_secret ( threshold , nshares , secret , identifier , hash_id = Hash . SHA256 ) : if identifier is None : raise TSSError ( 'an identifier must be provided' ) if not Hash . is_valid ( hash_id ) : raise TSSError ( 'invalid hash algorithm %s' % hash_id ) secret = encode ( secret ) identifier = encode ( identifier ) if hash_id != Hash . NONE : secret += Hash . to_func ( hash_id ) ( secret ) . digest ( ) shares = generate_shares ( threshold , nshares , secret ) header = format_header ( identifier , hash_id , threshold , len ( secret ) + 1 ) return [ format_share ( header , share ) for share in shares ]
| 4,866
|
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L212-L231
|
[
"def",
"close",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"self",
".",
"_check_ended",
"(",
")",
"if",
"end_time",
":",
"self",
".",
"end_time",
"=",
"end_time",
"else",
":",
"self",
".",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"in_progress",
"=",
"False"
] |
Get the gene symbols that a interval overlaps
|
def get_gene_symbols ( chrom , start , stop ) : gene_symbols = query_gene_symbol ( chrom , start , stop ) logger . debug ( "Found gene symbols: {0}" . format ( ', ' . join ( gene_symbols ) ) ) return gene_symbols
| 4,867
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L23-L27
|
[
"def",
"volumes_delete",
"(",
"storage_pool",
",",
"logger",
")",
":",
"try",
":",
"for",
"vol_name",
"in",
"storage_pool",
".",
"listVolumes",
"(",
")",
":",
"try",
":",
"vol",
"=",
"storage_pool",
".",
"storageVolLookupByName",
"(",
"vol_name",
")",
"vol",
".",
"delete",
"(",
"0",
")",
"except",
"libvirt",
".",
"libvirtError",
":",
"logger",
".",
"exception",
"(",
"\"Unable to delete storage volume %s.\"",
",",
"vol_name",
")",
"except",
"libvirt",
".",
"libvirtError",
":",
"logger",
".",
"exception",
"(",
"\"Unable to delete storage volumes.\"",
")"
] |
Return the genes info based on the transcripts found
|
def get_gene_info ( ensembl_ids = None , hgnc_symbols = None ) : uniq_ensembl_ids = set ( ensembl_id for ensembl_id in ( ensembl_ids or [ ] ) ) uniq_hgnc_symbols = set ( hgnc_symbol for hgnc_symbol in ( hgnc_symbols or [ ] ) ) genes = [ ] gene_data = [ ] if uniq_ensembl_ids : for ensembl_id in uniq_ensembl_ids : for res in query_gene ( ensembl_id = ensembl_id ) : gene_data . append ( res ) elif uniq_hgnc_symbols : for hgnc_symbol in uniq_hgnc_symbols : query_res = query_gene ( hgnc_symbol = hgnc_symbol ) if query_res : for res in query_res : gene_data . append ( res ) else : # If no result we add just the symbol gene_data . append ( { 'hgnc_symbol' : hgnc_symbol , 'hgnc_id' : None , 'ensembl_id' : None , 'description' : None , 'chrom' : 'unknown' , 'start' : 0 , 'stop' : 0 , 'hi_score' : None , 'constraint_score' : None , } ) for gene in gene_data : genes . append ( Gene ( symbol = gene [ 'hgnc_symbol' ] , hgnc_id = gene [ 'hgnc_id' ] , ensembl_id = gene [ 'ensembl_id' ] , description = gene [ 'description' ] , chrom = gene [ 'chrom' ] , start = gene [ 'start' ] , stop = gene [ 'stop' ] , location = get_cytoband_coord ( gene [ 'chrom' ] , gene [ 'start' ] ) , hi_score = gene [ 'hi_score' ] , constraint_score = gene [ 'constraint_score' ] , omim_number = get_omim_number ( gene [ 'hgnc_symbol' ] ) ) ) return genes
| 4,868
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L29-L83
|
[
"def",
"pull_session",
"(",
"session_id",
"=",
"None",
",",
"url",
"=",
"'default'",
",",
"io_loop",
"=",
"None",
",",
"arguments",
"=",
"None",
")",
":",
"coords",
"=",
"_SessionCoordinates",
"(",
"session_id",
"=",
"session_id",
",",
"url",
"=",
"url",
")",
"session",
"=",
"ClientSession",
"(",
"session_id",
"=",
"session_id",
",",
"websocket_url",
"=",
"websocket_url_for_server_url",
"(",
"coords",
".",
"url",
")",
",",
"io_loop",
"=",
"io_loop",
",",
"arguments",
"=",
"arguments",
")",
"session",
".",
"pull",
"(",
")",
"return",
"session"
] |
Get the most severe consequence
|
def get_most_severe_consequence ( transcripts ) : most_severe_consequence = None most_severe_score = None for transcript in transcripts : for consequence in transcript [ 'consequence' ] . split ( '&' ) : logger . debug ( "Checking severity score for consequence: {0}" . format ( consequence ) ) severity_score = SEVERITY_DICT . get ( consequence ) logger . debug ( "Severity score found: {0}" . format ( severity_score ) ) if severity_score != None : if most_severe_score : if severity_score < most_severe_score : most_severe_consequence = consequence most_severe_score = severity_score else : most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
| 4,869
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L85-L119
|
[
"def",
"running_state",
"(",
"self",
",",
"running_state",
")",
":",
"allowed_values",
"=",
"[",
"\"ONGOING\"",
",",
"\"PENDING\"",
",",
"\"ENDED\"",
"]",
"# noqa: E501",
"if",
"running_state",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `running_state` ({0}), must be one of {1}\"",
"# noqa: E501",
".",
"format",
"(",
"running_state",
",",
"allowed_values",
")",
")",
"self",
".",
"_running_state",
"=",
"running_state"
] |
Get the cytoband coordinate for a position
|
def get_cytoband_coord ( chrom , pos ) : chrom = chrom . strip ( 'chr' ) pos = int ( pos ) result = None logger . debug ( "Finding Cytoband for chrom:{0} pos:{1}" . format ( chrom , pos ) ) if chrom in CYTOBANDS : for interval in CYTOBANDS [ chrom ] [ pos ] : result = "{0}{1}" . format ( chrom , interval . data ) return result
| 4,870
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/get_info.py#L121-L139
|
[
"def",
"_prt_py_sections",
"(",
"self",
",",
"sec2d_nt",
",",
"prt",
"=",
"sys",
".",
"stdout",
",",
"doc",
"=",
"None",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc",
"=",
"'Sections variable'",
"prt",
".",
"write",
"(",
"'\"\"\"{DOC}\"\"\"\\n\\n'",
".",
"format",
"(",
"DOC",
"=",
"doc",
")",
")",
"self",
".",
"prt_ver",
"(",
"prt",
")",
"prt",
".",
"write",
"(",
"\"# pylint: disable=line-too-long\\n\"",
")",
"strcnt",
"=",
"self",
".",
"get_summary_str",
"(",
"sec2d_nt",
")",
"prt",
".",
"write",
"(",
"\"SECTIONS = [ # {CNTS}\\n\"",
".",
"format",
"(",
"CNTS",
"=",
"strcnt",
")",
")",
"prt",
".",
"write",
"(",
"' # (\"New Section\", [\\n'",
")",
"prt",
".",
"write",
"(",
"' # ]),\\n'",
")",
"for",
"section_name",
",",
"nthdrgos",
"in",
"sec2d_nt",
":",
"self",
".",
"_prt_py_section",
"(",
"prt",
",",
"section_name",
",",
"nthdrgos",
")",
"prt",
".",
"write",
"(",
"\"]\\n\"",
")"
] |
Do a simple parse of the dotfile mapping using semicolons to separate source file name from the target file paths .
|
def parse_mapping ( self , map_path , source = None , dotfiles = None ) : include_re = r"""^\s*#include\s+(".+"|'.+')""" include_re = re . compile ( include_re , re . I ) mapping_re = r"""^("[^"]+"|\'[^\']+\'|[^\'":]+)\s*(?::\s*(.*)\s*)?$""" mapping_re = re . compile ( mapping_re ) filename = None map_path = path . realpath ( path . expanduser ( map_path ) ) if path . isfile ( map_path ) : filename = map_path elif path . isdir ( map_path ) : # try finding a mapping in the target directory for map_name in '.dotfiles' , 'dotfiles' : candidate = path . join ( map_path , map_name ) if path . isfile ( candidate ) : filename = candidate break if filename is None : raise ValueError ( 'No dotfile mapping found in %s' % map_path ) if source is None : source = path . dirname ( map_path ) if dotfiles is None : dotfiles = OrderedDict ( ) lineno = 0 with open ( filename ) as fh : for line in fh : lineno += 1 content = line . strip ( ) match = include_re . match ( content ) if match : include_path = match . group ( 1 ) . strip ( '\'"' ) if ( include_path . startswith ( '/' ) or include_path . startswith ( '~' ) ) : include_path = path . realpath ( path . expanduser ( include_path ) ) else : include_path = path . join ( path . dirname ( filename ) , include_path ) if path . exists ( include_path ) : self . log . debug ( 'Recursively parsing mapping in %s' , include_path ) dotfiles = self . parse_mapping ( include_path , dotfiles = dotfiles ) else : self . log . warning ( 'Include command points to file or ' 'directory that does not exist, "%s",' ' on line %d' , include_path , lineno ) if not content or content . startswith ( '#' ) : # comment line or empty line continue match = mapping_re . match ( content ) if match : source_path , target_path = match . groups ( ) source_path = path . join ( source , source_path . strip ( '\'"' ) ) if source_path in dotfiles : self . log . warning ( 'Duplicate dotfile source "%s" ' 'on line #%d' , lineno ) continue if target_path is None : target_path = source_path dotfiles [ source_path ] = target_path else : self . log . warning ( 'Dotfile mapping regex failed on line ' '#%d' , lineno ) return dotfiles
| 4,871
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L129-L211
|
[
"def",
"LogAccessWrapper",
"(",
"func",
")",
":",
"def",
"Wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapping function.\"\"\"",
"try",
":",
"response",
"=",
"func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"server_logging",
".",
"LOGGER",
".",
"LogHttpAdminUIAccess",
"(",
"request",
",",
"response",
")",
"except",
"Exception",
":",
"# pylint: disable=g-broad-except",
"# This should never happen: wrapped function is supposed to handle",
"# all possible exceptions and generate a proper Response object.",
"# Still, handling exceptions here to guarantee that the access is logged",
"# no matter what.",
"response",
"=",
"werkzeug_wrappers",
".",
"Response",
"(",
"\"\"",
",",
"status",
"=",
"500",
")",
"server_logging",
".",
"LOGGER",
".",
"LogHttpAdminUIAccess",
"(",
"request",
",",
"response",
")",
"raise",
"return",
"response",
"return",
"Wrapper"
] |
Run a shell command with the given arguments .
|
def sh ( self , * command , * * kwargs ) : self . log . debug ( 'shell: %s' , ' ' . join ( command ) ) return subprocess . check_call ( ' ' . join ( command ) , stdout = sys . stdout , stderr = sys . stderr , stdin = sys . stdin , shell = True , * * kwargs )
| 4,872
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L213-L220
|
[
"def",
"save_to_file",
"(",
"self",
",",
"filename",
",",
"remap_dim0",
"=",
"None",
",",
"remap_dim1",
"=",
"None",
")",
":",
"# rows - first index",
"# columns - second index",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fobj",
":",
"columns",
"=",
"list",
"(",
"sorted",
"(",
"self",
".",
"_dim1",
")",
")",
"for",
"col",
"in",
"columns",
":",
"fobj",
".",
"write",
"(",
"','",
")",
"fobj",
".",
"write",
"(",
"str",
"(",
"remap_dim1",
"[",
"col",
"]",
"if",
"remap_dim1",
"else",
"col",
")",
")",
"fobj",
".",
"write",
"(",
"'\\n'",
")",
"for",
"row",
"in",
"sorted",
"(",
"self",
".",
"_dim0",
")",
":",
"fobj",
".",
"write",
"(",
"str",
"(",
"remap_dim0",
"[",
"row",
"]",
"if",
"remap_dim0",
"else",
"row",
")",
")",
"for",
"col",
"in",
"columns",
":",
"fobj",
".",
"write",
"(",
"','",
")",
"fobj",
".",
"write",
"(",
"str",
"(",
"self",
"[",
"row",
",",
"col",
"]",
")",
")",
"fobj",
".",
"write",
"(",
"'\\n'",
")"
] |
Copy a local file to the given remote path .
|
def scp ( self , local_file , remote_path = '' ) : if self . args . user : upload_spec = '{0}@{1}:{2}' . format ( self . args . user , self . args . server , remote_path ) else : upload_spec = '{0}:{1}' . format ( self . args . server , remote_path ) return self . sh ( 'scp' , local_file , upload_spec )
| 4,873
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L231-L240
|
[
"def",
"get_rng",
"(",
"obj",
"=",
"None",
")",
":",
"seed",
"=",
"(",
"id",
"(",
"obj",
")",
"+",
"os",
".",
"getpid",
"(",
")",
"+",
"int",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S%f\"",
")",
")",
")",
"%",
"4294967295",
"if",
"_RNG_SEED",
"is",
"not",
"None",
":",
"seed",
"=",
"_RNG_SEED",
"return",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")"
] |
Start the dotfile deployment process .
|
def run ( self ) : script = path . realpath ( __file__ ) self . log . debug ( 'Running from %s with arguments: %s' , script , self . args ) if self . args . source : self . source = self . args . source else : # hardcoding as the parent-parent of the script for now self . source = path . dirname ( path . dirname ( script ) ) self . log . debug ( 'Sourcing dotfiles from %s' , self . source ) try : if self . args . repo : self . clone_repo ( ) self . deploy_dotfiles ( self . load_dotfiles ( ) ) except : self . log . exception ( 'Profile deploy failed' ) finally : if self . args . repo : self . cleanup_repo ( )
| 4,874
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L246-L269
|
[
"def",
"user_deleted_from_site_event",
"(",
"event",
")",
":",
"userid",
"=",
"event",
".",
"principal",
"catalog",
"=",
"api",
".",
"portal",
".",
"get_tool",
"(",
"'portal_catalog'",
")",
"query",
"=",
"{",
"'object_provides'",
":",
"WORKSPACE_INTERFACE",
"}",
"query",
"[",
"'workspace_members'",
"]",
"=",
"userid",
"workspaces",
"=",
"[",
"IWorkspace",
"(",
"b",
".",
"_unrestrictedGetObject",
"(",
")",
")",
"for",
"b",
"in",
"catalog",
".",
"unrestrictedSearchResults",
"(",
"query",
")",
"]",
"for",
"workspace",
"in",
"workspaces",
":",
"workspace",
".",
"remove_from_team",
"(",
"userid",
")"
] |
Read in the dotfile mapping as a dictionary .
|
def load_dotfiles ( self ) : if self . args . map and path . exists ( self . args . map ) : dotfiles_path = self . args . map else : dotfiles_path = self . source self . log . debug ( 'Loading dotfile mapping from %s' , dotfiles_path ) return self . parse_mapping ( dotfiles_path , source = self . source )
| 4,875
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L271-L280
|
[
"def",
"make_random_models_table",
"(",
"n_sources",
",",
"param_ranges",
",",
"random_state",
"=",
"None",
")",
":",
"prng",
"=",
"check_random_state",
"(",
"random_state",
")",
"sources",
"=",
"Table",
"(",
")",
"for",
"param_name",
",",
"(",
"lower",
",",
"upper",
")",
"in",
"param_ranges",
".",
"items",
"(",
")",
":",
"# Generate a column for every item in param_ranges, even if it",
"# is not in the model (e.g. flux). However, such columns will",
"# be ignored when rendering the image.",
"sources",
"[",
"param_name",
"]",
"=",
"prng",
".",
"uniform",
"(",
"lower",
",",
"upper",
",",
"n_sources",
")",
"return",
"sources"
] |
Clone a repository containing the dotfiles source .
|
def clone_repo ( self ) : tempdir_path = tempfile . mkdtemp ( ) if self . args . git : self . log . debug ( 'Cloning git source repository from %s to %s' , self . source , tempdir_path ) self . sh ( 'git clone' , self . source , tempdir_path ) else : raise NotImplementedError ( 'Unknown repo type' ) self . source = tempdir_path
| 4,876
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L282-L294
|
[
"def",
"derivative_extraction",
"(",
"feat",
",",
"DeltaWindows",
")",
":",
"# Getting the shape of the vector.",
"rows",
",",
"cols",
"=",
"feat",
".",
"shape",
"# Difining the vector of differences.",
"DIF",
"=",
"np",
".",
"zeros",
"(",
"feat",
".",
"shape",
",",
"dtype",
"=",
"feat",
".",
"dtype",
")",
"Scale",
"=",
"0",
"# Pad only along features in the vector.",
"FEAT",
"=",
"np",
".",
"lib",
".",
"pad",
"(",
"feat",
",",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"DeltaWindows",
",",
"DeltaWindows",
")",
")",
",",
"'edge'",
")",
"for",
"i",
"in",
"range",
"(",
"DeltaWindows",
")",
":",
"# Start index",
"offset",
"=",
"DeltaWindows",
"# The dynamic range",
"Range",
"=",
"i",
"+",
"1",
"dif",
"=",
"Range",
"*",
"FEAT",
"[",
":",
",",
"offset",
"+",
"Range",
":",
"offset",
"+",
"Range",
"+",
"cols",
"]",
"-",
"FEAT",
"[",
":",
",",
"offset",
"-",
"Range",
":",
"offset",
"-",
"Range",
"+",
"cols",
"]",
"Scale",
"+=",
"2",
"*",
"np",
".",
"power",
"(",
"Range",
",",
"2",
")",
"DIF",
"+=",
"dif",
"return",
"DIF",
"/",
"Scale"
] |
Cleanup the temporary directory containing the dotfiles repo .
|
def cleanup_repo ( self ) : if self . source and path . isdir ( self . source ) : self . log . debug ( 'Cleaning up source repo from %s' , self . source ) shutil . rmtree ( self . source )
| 4,877
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L296-L300
|
[
"def",
"derivative_extraction",
"(",
"feat",
",",
"DeltaWindows",
")",
":",
"# Getting the shape of the vector.",
"rows",
",",
"cols",
"=",
"feat",
".",
"shape",
"# Difining the vector of differences.",
"DIF",
"=",
"np",
".",
"zeros",
"(",
"feat",
".",
"shape",
",",
"dtype",
"=",
"feat",
".",
"dtype",
")",
"Scale",
"=",
"0",
"# Pad only along features in the vector.",
"FEAT",
"=",
"np",
".",
"lib",
".",
"pad",
"(",
"feat",
",",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"DeltaWindows",
",",
"DeltaWindows",
")",
")",
",",
"'edge'",
")",
"for",
"i",
"in",
"range",
"(",
"DeltaWindows",
")",
":",
"# Start index",
"offset",
"=",
"DeltaWindows",
"# The dynamic range",
"Range",
"=",
"i",
"+",
"1",
"dif",
"=",
"Range",
"*",
"FEAT",
"[",
":",
",",
"offset",
"+",
"Range",
":",
"offset",
"+",
"Range",
"+",
"cols",
"]",
"-",
"FEAT",
"[",
":",
",",
"offset",
"-",
"Range",
":",
"offset",
"-",
"Range",
"+",
"cols",
"]",
"Scale",
"+=",
"2",
"*",
"np",
".",
"power",
"(",
"Range",
",",
"2",
")",
"DIF",
"+=",
"dif",
"return",
"DIF",
"/",
"Scale"
] |
Deploy dotfiles using the appropriate method .
|
def deploy_dotfiles ( self , dotfiles ) : if self . args . server : return self . deploy_remote ( dotfiles ) else : return self . deploy_local ( dotfiles )
| 4,878
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L302-L307
|
[
"def",
"get_active_token",
"(",
"self",
")",
":",
"expire_time",
"=",
"self",
".",
"store_handler",
".",
"has_value",
"(",
"\"expires\"",
")",
"access_token",
"=",
"self",
".",
"store_handler",
".",
"has_value",
"(",
"\"access_token\"",
")",
"if",
"expire_time",
"and",
"access_token",
":",
"expire_time",
"=",
"self",
".",
"store_handler",
".",
"get_value",
"(",
"\"expires\"",
")",
"if",
"not",
"datetime",
".",
"now",
"(",
")",
"<",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"expire_time",
")",
")",
":",
"self",
".",
"store_handler",
".",
"delete_value",
"(",
"\"access_token\"",
")",
"self",
".",
"store_handler",
".",
"delete_value",
"(",
"\"expires\"",
")",
"logger",
".",
"info",
"(",
"'Access token expired, going to get new token'",
")",
"self",
".",
"auth",
"(",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Access token noy expired yet'",
")",
"else",
":",
"self",
".",
"auth",
"(",
")",
"return",
"self",
".",
"store_handler",
".",
"get_value",
"(",
"\"access_token\"",
")"
] |
Deploy dotfiles to a remote server .
|
def deploy_remote ( self , dotfiles ) : tempfile_path = None tempdir_path = None try : tempdir_path = tempfile . mkdtemp ( ) self . log . debug ( 'Deploying to temp dir %s' , tempdir_path ) self . deploy_local ( dotfiles , target_root = tempdir_path ) if self . args . rsync : local_spec = tempdir_path . rstrip ( '/' ) + '/' remote_spec = self . args . path . rstrip ( '/' ) + '/' if self . args . user : remote_spec = "{0}@{1}:{2}" . format ( self . args . user , self . args . server , remote_spec ) else : remote_spec = "{0}:{1}" . format ( self . args . server , remote_spec ) self . log . debug ( 'Using rsync to sync dotfiles to %s' , remote_spec ) self . sh ( 'rsync' , '-az' , local_spec , remote_spec ) else : fh , tempfile_path = tempfile . mkstemp ( suffix = '.tar.gz' ) os . close ( fh ) self . log . debug ( 'Creating tar file %s' , tempfile_path ) shutil . make_archive ( tempfile_path . replace ( '.tar.gz' , '' ) , 'gztar' , tempdir_path ) upload_path = '_profile_upload.tgz' self . log . debug ( 'Uploading tarball to %s' , upload_path ) self . scp ( tempfile_path , upload_path ) if self . args . path : ssh_command = "'mkdir -p {0} && " "tar xf _profile_upload.tgz -C {0}; " "rm -f _profile_upload.tgz'" "" . format ( self . args . path ) else : ssh_command = "tar xf _profile_upload.tgz; " "rm -f _profile_upload.tgz" self . log . debug ( 'Using ssh to unpack tarball and clean up' ) self . ssh ( ssh_command ) finally : if tempdir_path and path . isdir ( tempdir_path ) : self . log . debug ( 'Removing temp dir %s' , tempdir_path ) shutil . rmtree ( tempdir_path ) if tempfile_path and path . isfile ( tempfile_path ) : self . log . debug ( 'Removing temp file %s' , tempfile_path ) os . unlink ( tempfile_path )
| 4,879
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L309-L364
|
[
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"augment_args",
"(",
"args",
",",
"kwargs",
")",
"kwargs",
"[",
"'log_action'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'log_action'",
",",
"'update'",
")",
"if",
"not",
"self",
".",
"rec",
":",
"return",
"self",
".",
"add",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"# Don't update object; use whatever was set in the original record",
"if",
"k",
"not",
"in",
"(",
"'source'",
",",
"'s_vid'",
",",
"'table'",
",",
"'t_vid'",
",",
"'partition'",
",",
"'p_vid'",
")",
":",
"setattr",
"(",
"self",
".",
"rec",
",",
"k",
",",
"v",
")",
"self",
".",
"_session",
".",
"merge",
"(",
"self",
".",
"rec",
")",
"if",
"self",
".",
"_logger",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"self",
".",
"rec",
".",
"log_str",
")",
"self",
".",
"_session",
".",
"commit",
"(",
")",
"self",
".",
"_ai_rec_id",
"=",
"None",
"return",
"self",
".",
"rec",
".",
"id"
] |
Deploy dotfiles to a local path .
|
def deploy_local ( self , dotfiles , target_root = None ) : if target_root is None : target_root = self . args . path for source_path , target_path in dotfiles . items ( ) : source_path = path . join ( self . source , source_path ) target_path = path . join ( target_root , target_path ) if path . isfile ( target_path ) or path . islink ( target_path ) : self . log . debug ( 'Removing existing file at %s' , target_path ) os . unlink ( target_path ) elif path . isdir ( target_path ) : self . log . debug ( 'Removing existing dir at %s' , target_path ) shutil . rmtree ( target_path ) parent_dir = path . dirname ( target_path ) if not path . isdir ( parent_dir ) : self . log . debug ( 'Creating parent dir %s' , parent_dir ) os . makedirs ( parent_dir ) if self . args . copy : if path . isdir ( source_path ) : self . log . debug ( 'Copying file %s to %s' , source_path , target_path ) shutil . copytree ( source_path , target_path ) else : self . log . debug ( 'Copying dir %s to %s' , source_path , target_path ) shutil . copy ( source_path , target_path ) else : self . log . debug ( 'Symlinking %s -> %s' , target_path , source_path ) os . symlink ( source_path , target_path )
| 4,880
|
https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L366-L400
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"return",
"Event",
".",
"objects",
".",
"filter",
"(",
"Q",
"(",
"startTime__gte",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"90",
")",
")",
"&",
"(",
"Q",
"(",
"series__isnull",
"=",
"False",
")",
"|",
"Q",
"(",
"publicevent__isnull",
"=",
"False",
")",
")",
")",
".",
"annotate",
"(",
"count",
"=",
"Count",
"(",
"'eventregistration'",
")",
")",
".",
"annotate",
"(",
"*",
"*",
"self",
".",
"get_annotations",
"(",
")",
")",
".",
"exclude",
"(",
"Q",
"(",
"count",
"=",
"0",
")",
"&",
"Q",
"(",
"status__in",
"=",
"[",
"Event",
".",
"RegStatus",
".",
"hidden",
",",
"Event",
".",
"RegStatus",
".",
"regHidden",
",",
"Event",
".",
"RegStatus",
".",
"disabled",
"]",
")",
")"
] |
Remove duplicates from a list preserving the order .
|
def dedupe_list ( l ) : result = [ ] for el in l : if el not in result : result . append ( el ) return result
| 4,881
|
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/utils.py#L107-L120
|
[
"def",
"init",
"(",
"cls",
",",
"conn_string",
"=",
"None",
")",
":",
"if",
"conn_string",
":",
"_update_meta",
"(",
"conn_string",
")",
"# We initialize the engine within the models module because models'",
"# schema can depend on which data types are supported by the engine",
"Meta",
".",
"Session",
"=",
"new_sessionmaker",
"(",
")",
"Meta",
".",
"engine",
"=",
"Meta",
".",
"Session",
".",
"kw",
"[",
"\"bind\"",
"]",
"logger",
".",
"info",
"(",
"f\"Connecting user:{Meta.DBUSER} \"",
"f\"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}\"",
")",
"Meta",
".",
"_init_db",
"(",
")",
"if",
"not",
"Meta",
".",
"log_path",
":",
"init_logging",
"(",
")",
"return",
"cls"
] |
Plotting of amino diagrams - circles with residue name and id colored according to the residue type . If the protein has more than one chain chain identity is also included in the plot . The plot is saved as svg file with residue id and chain id as filename for more certain identification .
|
def plot_amino_diagrams ( self ) : for res in self . topology_data . dict_of_plotted_res : try : color = [ self . colors_amino_acids [ self . amino_acids [ res [ 0 ] ] ] , 'white' ] except KeyError : color = [ "pink" , 'white' ] plt . figure ( figsize = ( 2.5 , 2.5 ) ) ring1 , _ = plt . pie ( [ 1 ] , radius = 1 , startangle = 90 , colors = color , counterclock = False ) plt . axis ( 'equal' ) plt . setp ( ring1 , width = 1 , edgecolor = color [ 0 ] ) if len ( self . topology_data . universe . protein . segments ) <= 1 : #Parameters for amino diagrams without segids plt . text ( 0 , - 0.45 , res [ 0 ] + "\n" + res [ 1 ] , ha = 'center' , size = 36 , fontweight = "bold" ) else : #Parameters for amino diagrams with segids plt . text ( 0 , - 0.37 , res [ 0 ] + "\n" + res [ 1 ] + " " + res [ 2 ] , ha = 'center' , size = 30 , fontweight = "bold" ) #play with the dpi pylab . savefig ( str ( res [ 1 ] ) + res [ 2 ] + ".svg" , dpi = 300 , transparent = True )
| 4,882
|
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/plots.py#L47-L71
|
[
"def",
"clear",
"(",
"self",
")",
":",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"queue",
".",
"keys",
"(",
")",
")",
":",
"if",
"self",
".",
"queue",
"[",
"key",
"]",
"[",
"'status'",
"]",
"in",
"[",
"'done'",
",",
"'failed'",
"]",
":",
"del",
"self",
".",
"queue",
"[",
"key",
"]",
"self",
".",
"write",
"(",
")"
] |
Create a cases and populate it with individuals
|
def get_cases ( variant_source , case_lines = None , case_type = 'ped' , variant_type = 'snv' , variant_mode = 'vcf' ) : individuals = get_individuals ( variant_source = variant_source , case_lines = case_lines , case_type = case_type , variant_mode = variant_mode ) case_objs = [ ] case_ids = set ( ) compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source . endswith ( '.gz' ) : logger . debug ( "Found compressed variant source" ) compressed = True tabix_file = '.' . join ( [ variant_source , 'tbi' ] ) if os . path . exists ( tabix_file ) : logger . debug ( "Found index file" ) tabix_index = True if len ( individuals ) > 0 : for individual in individuals : case_ids . add ( individual . case_id ) else : case_ids = [ os . path . basename ( variant_source ) ] for case_id in case_ids : logger . info ( "Found case {0}" . format ( case_id ) ) case = Case ( case_id = case_id , name = case_id , variant_source = variant_source , variant_type = variant_type , variant_mode = variant_mode , compressed = compressed , tabix_index = tabix_index ) # Add the individuals to the correct case for individual in individuals : if individual . case_id == case_id : logger . info ( "Adding ind {0} to case {1}" . format ( individual . name , individual . case_id ) ) case . add_individual ( individual ) case_objs . append ( case ) return case_objs
| 4,883
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/ped.py#L20-L80
|
[
"def",
"refresh_table_metadata",
"(",
"self",
",",
"keyspace",
",",
"table",
",",
"max_schema_agreement_wait",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"control_connection",
".",
"refresh_schema",
"(",
"target_type",
"=",
"SchemaTargetType",
".",
"TABLE",
",",
"keyspace",
"=",
"keyspace",
",",
"table",
"=",
"table",
",",
"schema_agreement_wait",
"=",
"max_schema_agreement_wait",
",",
"force",
"=",
"True",
")",
":",
"raise",
"DriverException",
"(",
"\"Table metadata was not refreshed. See log for details.\"",
")"
] |
Process UDP messages
|
def handle_message ( self , message ) : if self . _yamaha : if 'power' in message : _LOGGER . debug ( "Power: %s" , message . get ( 'power' ) ) self . _yamaha . power = ( STATE_ON if message . get ( 'power' ) == "on" else STATE_OFF ) if 'input' in message : _LOGGER . debug ( "Input: %s" , message . get ( 'input' ) ) self . _yamaha . _source = message . get ( 'input' ) if 'volume' in message : volume = message . get ( 'volume' ) if 'max_volume' in message : volume_max = message . get ( 'max_volume' ) else : volume_max = self . _yamaha . volume_max _LOGGER . debug ( "Volume: %d / Max: %d" , volume , volume_max ) self . _yamaha . volume = volume / volume_max self . _yamaha . volume_max = volume_max if 'mute' in message : _LOGGER . debug ( "Mute: %s" , message . get ( 'mute' ) ) self . _yamaha . mute = message . get ( 'mute' , False ) else : _LOGGER . debug ( "No yamaha-obj found" )
| 4,884
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L54-L80
|
[
"def",
"returnJobReqs",
"(",
"self",
",",
"jobReqs",
")",
":",
"# Since we are only reading this job's specific values from the state file, we don't",
"# need a lock",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"self",
".",
"_CacheState",
".",
"_load",
"(",
"self",
".",
"cacheStateFile",
")",
".",
"jobState",
"[",
"self",
".",
"jobID",
"]",
")",
"for",
"x",
"in",
"list",
"(",
"jobState",
".",
"jobSpecificFiles",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"deleteLocalFile",
"(",
"x",
")",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cacheInfo",
".",
"sigmaJob",
"-=",
"jobReqs"
] |
Updates the zone status .
|
def update_status ( self , new_status = None ) : _LOGGER . debug ( "update_status: Zone %s" , self . zone_id ) if self . status and new_status is None : _LOGGER . debug ( "Zone: healthy." ) else : old_status = self . status or { } if new_status : # merge new_status with existing for comparison _LOGGER . debug ( "Set status: provided" ) # make a copy of the old_status status = old_status . copy ( ) # merge updated items into status status . update ( new_status ) # promote merged_status to new_status new_status = status else : _LOGGER . debug ( "Set status: own" ) new_status = self . get_status ( ) _LOGGER . debug ( "old_status: %s" , old_status ) _LOGGER . debug ( "new_status: %s" , new_status ) _LOGGER . debug ( "is_equal: %s" , old_status == new_status ) if new_status != old_status : self . handle_message ( new_status ) self . _status_sent = False self . status = new_status if not self . _status_sent : self . _status_sent = self . update_hass ( )
| 4,885
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L82-L117
|
[
"def",
"on_response",
"(",
"self",
",",
"ch",
",",
"method_frame",
",",
"props",
",",
"body",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"rabbitmq.Requester.on_response\"",
")",
"if",
"self",
".",
"corr_id",
"==",
"props",
".",
"correlation_id",
":",
"self",
".",
"response",
"=",
"{",
"'props'",
":",
"props",
",",
"'body'",
":",
"body",
"}",
"else",
":",
"LOGGER",
".",
"warn",
"(",
"\"rabbitmq.Requester.on_response - discarded response : \"",
"+",
"str",
"(",
"props",
".",
"correlation_id",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"natsd.Requester.on_response - discarded response : \"",
"+",
"str",
"(",
"{",
"'properties'",
":",
"props",
",",
"'body'",
":",
"body",
"}",
")",
")"
] |
Send Power command .
|
def set_power ( self , power ) : req_url = ENDPOINTS [ "setPower" ] . format ( self . ip_address , self . zone_id ) params = { "power" : "on" if power else "standby" } return request ( req_url , params = params )
| 4,886
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L133-L137
|
[
"def",
"_adjust_n_years",
"(",
"other",
",",
"n",
",",
"month",
",",
"reference_day",
")",
":",
"if",
"n",
">",
"0",
":",
"if",
"other",
".",
"month",
"<",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
"<",
"reference_day",
")",
":",
"n",
"-=",
"1",
"else",
":",
"if",
"other",
".",
"month",
">",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
">",
"reference_day",
")",
":",
"n",
"+=",
"1",
"return",
"n"
] |
Send mute command .
|
def set_mute ( self , mute ) : req_url = ENDPOINTS [ "setMute" ] . format ( self . ip_address , self . zone_id ) params = { "enable" : "true" if mute else "false" } return request ( req_url , params = params )
| 4,887
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L139-L143
|
[
"def",
"_adjust_n_years",
"(",
"other",
",",
"n",
",",
"month",
",",
"reference_day",
")",
":",
"if",
"n",
">",
"0",
":",
"if",
"other",
".",
"month",
"<",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
"<",
"reference_day",
")",
":",
"n",
"-=",
"1",
"else",
":",
"if",
"other",
".",
"month",
">",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
">",
"reference_day",
")",
":",
"n",
"+=",
"1",
"return",
"n"
] |
Send Volume command .
|
def set_volume ( self , volume ) : req_url = ENDPOINTS [ "setVolume" ] . format ( self . ip_address , self . zone_id ) params = { "volume" : int ( volume ) } return request ( req_url , params = params )
| 4,888
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L145-L149
|
[
"def",
"_adjust_n_years",
"(",
"other",
",",
"n",
",",
"month",
",",
"reference_day",
")",
":",
"if",
"n",
">",
"0",
":",
"if",
"other",
".",
"month",
"<",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
"<",
"reference_day",
")",
":",
"n",
"-=",
"1",
"else",
":",
"if",
"other",
".",
"month",
">",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
">",
"reference_day",
")",
":",
"n",
"+=",
"1",
"return",
"n"
] |
Send Input command .
|
def set_input ( self , input_id ) : req_url = ENDPOINTS [ "setInput" ] . format ( self . ip_address , self . zone_id ) params = { "input" : input_id } return request ( req_url , params = params )
| 4,889
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/zone.py#L151-L155
|
[
"def",
"_adjust_n_years",
"(",
"other",
",",
"n",
",",
"month",
",",
"reference_day",
")",
":",
"if",
"n",
">",
"0",
":",
"if",
"other",
".",
"month",
"<",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
"<",
"reference_day",
")",
":",
"n",
"-=",
"1",
"else",
":",
"if",
"other",
".",
"month",
">",
"month",
"or",
"(",
"other",
".",
"month",
"==",
"month",
"and",
"other",
".",
"day",
">",
"reference_day",
")",
":",
"n",
"+=",
"1",
"return",
"n"
] |
Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score
|
def _add_compounds ( self , variant_obj , info_dict ) : compound_list = [ ] compound_entry = info_dict . get ( 'Compounds' ) if compound_entry : for family_annotation in compound_entry . split ( ',' ) : compounds = family_annotation . split ( ':' ) [ - 1 ] . split ( '|' ) for compound in compounds : splitted_compound = compound . split ( '>' ) compound_score = None if len ( splitted_compound ) > 1 : compound_id = splitted_compound [ 0 ] compound_score = int ( splitted_compound [ - 1 ] ) compound_list . append ( Compound ( variant_id = compound_id , combined_score = compound_score ) ) #Sort the compounds based on rank score compound_list . sort ( key = operator . attrgetter ( 'combined_score' ) , reverse = True ) for compound in compound_list : variant_obj . add_compound ( compound )
| 4,890
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/annotations.py#L11-L39
|
[
"def",
"serverinfo",
"(",
"url",
"=",
"'http://localhost:8080/manager'",
",",
"timeout",
"=",
"180",
")",
":",
"data",
"=",
"_wget",
"(",
"'serverinfo'",
",",
"{",
"}",
",",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"data",
"[",
"'res'",
"]",
"is",
"False",
":",
"return",
"{",
"'error'",
":",
"data",
"[",
"'msg'",
"]",
"}",
"ret",
"=",
"{",
"}",
"data",
"[",
"'msg'",
"]",
".",
"pop",
"(",
"0",
")",
"for",
"line",
"in",
"data",
"[",
"'msg'",
"]",
":",
"tmp",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"ret",
"[",
"tmp",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"]",
"=",
"tmp",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"ret"
] |
r Parse the contents of the file - like object fp as an XML properties file and return a dict of the key - value pairs .
|
def load_xml ( fp , object_pairs_hook = dict ) : tree = ET . parse ( fp ) return object_pairs_hook ( _fromXML ( tree . getroot ( ) ) )
| 4,891
|
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L7-L41
|
[
"def",
"fail_run_group",
"(",
"group",
",",
"session",
")",
":",
"from",
"datetime",
"import",
"datetime",
"group",
".",
"end",
"=",
"datetime",
".",
"now",
"(",
")",
"group",
".",
"status",
"=",
"'failed'",
"session",
".",
"commit",
"(",
")"
] |
r Parse the contents of the string s as an XML properties document and return a dict of the key - value pairs .
|
def loads_xml ( s , object_pairs_hook = dict ) : elem = ET . fromstring ( s ) return object_pairs_hook ( _fromXML ( elem ) )
| 4,892
|
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L43-L77
|
[
"def",
"ITRF_position_velocity_error",
"(",
"self",
",",
"t",
")",
":",
"rTEME",
",",
"vTEME",
",",
"error",
"=",
"self",
".",
"_position_and_velocity_TEME_km",
"(",
"t",
")",
"rTEME",
"/=",
"AU_KM",
"vTEME",
"/=",
"AU_KM",
"vTEME",
"*=",
"DAY_S",
"rITRF",
",",
"vITRF",
"=",
"TEME_to_ITRF",
"(",
"t",
".",
"ut1",
",",
"rTEME",
",",
"vTEME",
")",
"return",
"rITRF",
",",
"vITRF",
",",
"error"
] |
Write a series props of key - value pairs to a binary filehandle fp in the format of an XML properties file . The file will include both an XML declaration and a doctype declaration .
|
def dump_xml ( props , fp , comment = None , encoding = 'UTF-8' , sort_keys = False ) : fp = codecs . lookup ( encoding ) . streamwriter ( fp , errors = 'xmlcharrefreplace' ) print ( '<?xml version="1.0" encoding={0} standalone="no"?>' . format ( quoteattr ( encoding ) ) , file = fp ) for s in _stream_xml ( props , comment , sort_keys ) : print ( s , file = fp )
| 4,893
|
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L88-L112
|
[
"def",
"subsystem",
"(",
"s",
")",
":",
"node_states",
"(",
"s",
".",
"state",
")",
"cut",
"(",
"s",
".",
"cut",
",",
"s",
".",
"cut_indices",
")",
"if",
"config",
".",
"VALIDATE_SUBSYSTEM_STATES",
":",
"state_reachable",
"(",
"s",
")",
"return",
"True"
] |
Convert a series props of key - value pairs to a text string containing an XML properties document . The document will include a doctype declaration but not an XML declaration .
|
def dumps_xml ( props , comment = None , sort_keys = False ) : return '' . join ( s + '\n' for s in _stream_xml ( props , comment , sort_keys ) )
| 4,894
|
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L114-L130
|
[
"def",
"get_max_age",
"(",
"self",
")",
":",
"default_ttl",
"=",
"self",
".",
"context",
".",
"config",
".",
"RESULT_STORAGE_EXPIRATION_SECONDS",
"if",
"self",
".",
"context",
".",
"request",
".",
"max_age",
"==",
"0",
":",
"return",
"self",
".",
"context",
".",
"request",
".",
"max_age",
"return",
"default_ttl"
] |
Configure connection to a SQL database .
|
def connect ( self , db_uri , debug = False ) : kwargs = { 'echo' : debug , 'convert_unicode' : True } # connect to the SQL database if 'mysql' in db_uri : kwargs [ 'pool_recycle' ] = 3600 elif '://' not in db_uri : logger . debug ( "detected sqlite path URI: {}" . format ( db_uri ) ) db_path = os . path . abspath ( os . path . expanduser ( db_uri ) ) db_uri = "sqlite:///{}" . format ( db_path ) self . engine = create_engine ( db_uri , * * kwargs ) logger . debug ( 'connection established successfully' ) # make sure the same engine is propagated to the BASE classes BASE . metadata . bind = self . engine # start a session self . session = scoped_session ( sessionmaker ( bind = self . engine ) ) # shortcut to query method self . query = self . session . query return self
| 4,895
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/store.py#L62-L86
|
[
"def",
"remove_armor",
"(",
"armored_data",
")",
":",
"stream",
"=",
"io",
".",
"BytesIO",
"(",
"armored_data",
")",
"lines",
"=",
"stream",
".",
"readlines",
"(",
")",
"[",
"3",
":",
"-",
"1",
"]",
"data",
"=",
"base64",
".",
"b64decode",
"(",
"b''",
".",
"join",
"(",
"lines",
")",
")",
"payload",
",",
"checksum",
"=",
"data",
"[",
":",
"-",
"3",
"]",
",",
"data",
"[",
"-",
"3",
":",
"]",
"assert",
"util",
".",
"crc24",
"(",
"payload",
")",
"==",
"checksum",
"return",
"payload"
] |
Select and initialize the correct plugin for the case .
|
def select_plugin ( self , case_obj ) : if case_obj . variant_mode == 'vcf' : logger . debug ( "Using vcf plugin" ) plugin = VcfPlugin ( case_obj . variant_type ) elif case_obj . variant_mode == 'gemini' : logger . debug ( "Using gemini plugin" ) plugin = GeminiPlugin ( case_obj . variant_type ) #Add case to plugin plugin . add_case ( case_obj ) self . variant_type = case_obj . variant_type case_id = case_obj . case_id return plugin , case_id
| 4,896
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/store.py#L131-L146
|
[
"def",
"add_item",
"(",
"self",
",",
"item_url",
",",
"item_metadata",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"DELETE FROM items WHERE url=?\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO items VALUES (?, ?, ?)\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
"item_metadata",
",",
"self",
".",
"__now_iso_8601",
"(",
")",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"close",
"(",
")"
] |
Show the landing page .
|
def index ( ) : gene_lists = app . db . gene_lists ( ) if app . config [ 'STORE_ENABLED' ] else [ ] queries = app . db . gemini_queries ( ) if app . config [ 'STORE_ENABLED' ] else [ ] case_groups = { } for case in app . db . cases ( ) : key = ( case . variant_source , case . variant_type , case . variant_mode ) if key not in case_groups : case_groups [ key ] = [ ] case_groups [ key ] . append ( case ) return render_template ( 'index.html' , case_groups = case_groups , gene_lists = gene_lists , queries = queries )
| 4,897
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L18-L31
|
[
"def",
"describe_api_models",
"(",
"restApiId",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"models",
"=",
"_multi_call",
"(",
"conn",
".",
"get_models",
",",
"'items'",
",",
"restApiId",
"=",
"restApiId",
")",
"return",
"{",
"'models'",
":",
"[",
"_convert_datetime_str",
"(",
"model",
")",
"for",
"model",
"in",
"models",
"]",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] |
Show the overview for a case .
|
def case ( case_id ) : case_obj = app . db . case ( case_id ) return render_template ( 'case.html' , case = case_obj , case_id = case_id )
| 4,898
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L35-L38
|
[
"def",
"libvlc_video_set_crop_geometry",
"(",
"p_mi",
",",
"psz_geometry",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_set_crop_geometry'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_set_crop_geometry'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
",",
"ctypes",
".",
"c_char_p",
")",
"return",
"f",
"(",
"p_mi",
",",
"psz_geometry",
")"
] |
Delete phenotype from an individual .
|
def delete_phenotype ( phenotype_id ) : ind_id = request . form [ 'ind_id' ] ind_obj = app . db . individual ( ind_id ) try : app . db . remove_phenotype ( ind_obj , phenotype_id ) except RuntimeError as error : return abort ( 500 , error . message ) return redirect ( request . referrer )
| 4,899
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/public/views.py#L66-L74
|
[
"def",
"share",
"(",
"self",
",",
"group_id",
",",
"group_access",
",",
"expires_at",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'/projects/%s/share'",
"%",
"self",
".",
"get_id",
"(",
")",
"data",
"=",
"{",
"'group_id'",
":",
"group_id",
",",
"'group_access'",
":",
"group_access",
",",
"'expires_at'",
":",
"expires_at",
"}",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_post",
"(",
"path",
",",
"post_data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.