query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Run the forward modeling for all frequencies .
def model ( self , * * kwargs ) : for key , td in self . tds . items ( ) : td . model ( * * kwargs )
12,400
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L474-L488
[ "def", "close_authenticator", "(", "self", ")", ":", "_logger", ".", "info", "(", "\"Shutting down CBS session on connection: %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "try", ":", "_logger", ".", "debug", "(", "\"Unlocked CBS to close on connection: %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "self", ".", "_cbs_auth", ".", "destroy", "(", ")", "_logger", ".", "info", "(", "\"Auth closed, destroying session on connection: %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")", "self", ".", "_session", ".", "destroy", "(", ")", "finally", ":", "_logger", ".", "info", "(", "\"Finished shutting down CBS session on connection: %r.\"", ",", "self", ".", "_connection", ".", "container_id", ")" ]
Return modeled measurements
def measurements ( self ) : m_all = np . array ( [ self . tds [ key ] . measurements ( ) for key in sorted ( self . tds . keys ( ) ) ] ) return m_all
12,401
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L490-L500
[ "def", "__getBio", "(", "self", ",", "web", ")", ":", "bio", "=", "web", ".", "find_all", "(", "\"div\"", ",", "{", "\"class\"", ":", "\"user-profile-bio\"", "}", ")", "if", "bio", ":", "try", ":", "bio", "=", "bio", "[", "0", "]", ".", "text", "if", "bio", "and", "GitHubUser", ".", "isASCII", "(", "bio", ")", ":", "bioText", "=", "bio", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "bioText", "=", "bioText", ".", "replace", "(", "\"\\t\"", ",", "\" \"", ")", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "bioText", "=", "bioText", ".", "replace", "(", "\"\\'\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ")", "self", ".", "bio", "=", "bioText", "else", ":", "self", ".", "bio", "=", "\"\"", "except", "IndexError", "as", "error", ":", "print", "(", "\"There was an error with the user \"", "+", "self", ".", "name", ")", "print", "(", "error", ")", "except", "AttributeError", "as", "error", ":", "print", "(", "\"There was an error with the user \"", "+", "self", ".", "name", ")", "print", "(", "error", ")" ]
Return a dictionary of sip_responses for the modeled SIP spectra
def get_measurement_responses ( self ) : # take configurations from first tomodir configs = self . tds [ sorted ( self . tds . keys ( ) ) [ 0 ] ] . configs . configs measurements = self . measurements ( ) responses = { } for config , sip_measurement in zip ( configs , np . rollaxis ( measurements , 1 ) ) : sip = sip_response ( frequencies = self . frequencies , rmag = sip_measurement [ : , 0 ] , rpha = sip_measurement [ : , 1 ] ) responses [ tuple ( config ) ] = sip return responses
12,402
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L502-L527
[ "def", "inspect", "(", "item", ",", "maxchar", "=", "80", ")", ":", "for", "i", "in", "dir", "(", "item", ")", ":", "try", ":", "member", "=", "str", "(", "getattr", "(", "item", ",", "i", ")", ")", "if", "maxchar", "and", "len", "(", "member", ")", ">", "maxchar", ":", "member", "=", "member", "[", ":", "maxchar", "]", "+", "\"...\"", "except", ":", "member", "=", "\"[ERROR]\"", "print", "(", "\"{}: {}\"", ".", "format", "(", "i", ",", "member", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Command to create a database
def create_database ( name , number = 1 , force_clear = False ) : print 'Got:' print 'name' , name , type ( name ) print 'number' , number , type ( number ) print 'force_clear' , force_clear , type ( force_clear )
12,403
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/examples/formal_project/manage.py#L10-L16
[ "def", "indication", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"indication %r\"", ",", "apdu", ")", "if", "self", ".", "state", "==", "IDLE", ":", "self", ".", "idle", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request", "(", "apdu", ")", "elif", "self", ".", "state", "==", "AWAIT_RESPONSE", ":", "self", ".", "await_response", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_RESPONSE", ":", "self", ".", "segmented_response", "(", "apdu", ")", "else", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\" - invalid state\"", ")" ]
Returns the long path name for a Windows path i . e . the properly cased path of an existing file or directory .
def _get_long_path_name ( path ) : # Thanks to http://stackoverflow.com/a/3694799/791713 buf = ctypes . create_unicode_buffer ( len ( path ) + 1 ) GetLongPathNameW = ctypes . windll . kernel32 . GetLongPathNameW res = GetLongPathNameW ( path , buf , len ( path ) + 1 ) if res == 0 or res > 260 : return path else : return buf . value
12,404
https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L44-L57
[ "def", "merge", "(", "obj_a", ",", "obj_b", ",", "strategy", "=", "'smart'", ",", "renderer", "=", "'yaml'", ",", "merge_lists", "=", "False", ")", ":", "return", "salt", ".", "utils", ".", "dictupdate", ".", "merge", "(", "obj_a", ",", "obj_b", ",", "strategy", ",", "renderer", ",", "merge_lists", ")" ]
Checks if depends . exe is in the system PATH . If not it will be downloaded and extracted to a temporary directory . Note that the file will not be deleted afterwards .
def get_dependency_walker ( ) : for dirname in os . getenv ( 'PATH' , '' ) . split ( os . pathsep ) : filename = os . path . join ( dirname , 'depends.exe' ) if os . path . isfile ( filename ) : logger . info ( 'Dependency Walker found at "{}"' . format ( filename ) ) return filename temp_exe = os . path . join ( tempfile . gettempdir ( ) , 'depends.exe' ) temp_dll = os . path . join ( tempfile . gettempdir ( ) , 'depends.dll' ) if os . path . isfile ( temp_exe ) : logger . info ( 'Dependency Walker found at "{}"' . format ( temp_exe ) ) return temp_exe logger . info ( 'Dependency Walker not found. Downloading ...' ) with urlopen ( 'http://dependencywalker.com/depends22_x64.zip' ) as fp : data = fp . read ( ) logger . info ( 'Extracting Dependency Walker to "{}"' . format ( temp_exe ) ) with zipfile . ZipFile ( io . BytesIO ( data ) ) as fp : with fp . open ( 'depends.exe' ) as src : with open ( temp_exe , 'wb' ) as dst : shutil . copyfileobj ( src , dst ) with fp . open ( 'depends.dll' ) as src : with open ( temp_dll , 'wb' ) as dst : shutil . copyfileobj ( src , dst ) return temp_exe
12,405
https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L60-L94
[ "def", "get_first_mapping", "(", "cls", ")", ":", "from", ".", "models", "import", "Indexable", "if", "issubclass", "(", "cls", ",", "Indexable", ")", "and", "hasattr", "(", "cls", ",", "\"Mapping\"", ")", ":", "return", "cls", ".", "Mapping", "for", "base", "in", "cls", ".", "__bases__", ":", "mapping", "=", "get_first_mapping", "(", "base", ")", "if", "mapping", ":", "return", "mapping", "return", "None" ]
This decorator wrap a function which setup a environment before running a command
def prepare ( self , setup_func ) : assert inspect . isfunction ( setup_func ) argsspec = inspect . getargspec ( setup_func ) if argsspec . args : raise ValueError ( "prepare function shouldn't have any arguments" ) def decorator ( command_func ) : @ functools . wraps ( command_func ) def wrapper ( * args , * * kwgs ) : # Run setup_func before command_func setup_func ( ) return command_func ( * args , * * kwgs ) return wrapper return decorator
12,406
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/script.py#L220-L239
[ "def", "get_datasource_info", "(", "datasource_id", ",", "datasource_type", ",", "form_data", ")", ":", "datasource", "=", "form_data", ".", "get", "(", "'datasource'", ",", "''", ")", "if", "'__'", "in", "datasource", ":", "datasource_id", ",", "datasource_type", "=", "datasource", ".", "split", "(", "'__'", ")", "# The case where the datasource has been deleted", "datasource_id", "=", "None", "if", "datasource_id", "==", "'None'", "else", "datasource_id", "if", "not", "datasource_id", ":", "raise", "Exception", "(", "'The datasource associated with this chart no longer exists'", ")", "datasource_id", "=", "int", "(", "datasource_id", ")", "return", "datasource_id", ",", "datasource_type" ]
Add LayoutExternalPort for interface
def addPort ( n : LNode , intf : Interface ) : d = PortTypeFromDir ( intf . _direction ) ext_p = LayoutExternalPort ( n , name = intf . _name , direction = d , node2lnode = n . _node2lnode ) ext_p . originObj = originObjOfPort ( intf ) n . children . append ( ext_p ) addPortToLNode ( ext_p , intf , reverseDirection = True ) return ext_p
12,407
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/utils.py#L231-L241
[ "def", "sql_like_fragments", "(", "self", ")", "->", "List", "[", "str", "]", ":", "if", "self", ".", "_sql_like_fragments", "is", "None", ":", "self", ".", "_sql_like_fragments", "=", "[", "]", "for", "p", "in", "list", "(", "set", "(", "self", ".", "all_generics", "+", "self", ".", "alternatives", ")", ")", ":", "self", ".", "_sql_like_fragments", ".", "extend", "(", "self", ".", "regex_to_sql_like", "(", "p", ")", ")", "return", "self", ".", "_sql_like_fragments" ]
Loop over the object process path attribute sets and drawlines based on their current contents .
def drawtree ( self ) : self . win . erase ( ) self . line = 0 for child , depth in self . traverse ( ) : child . curline = self . curline child . picked = self . picked child . expanded = self . expanded child . sized = self . sized if depth == 0 : continue if self . line == self . curline : self . color . curline ( child . name , child . picked ) children = child . children name = child . name else : self . color . default ( child . name , child . picked ) if child . name in self . sized and not self . sized [ child . name ] : self . sized [ child . name ] = " [" + du ( child . name ) + "]" child . drawline ( depth , self . line , self . win ) self . line += 1 self . win . refresh ( ) self . mkheader ( name ) self . mkfooter ( name , children )
12,408
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/draw.py#L64-L90
[ "def", "header", "(", "cls", ",", "name", ",", "type_", "=", "Type", ".", "String", ",", "description", "=", "None", ",", "default", "=", "None", ",", "required", "=", "None", ",", "*", "*", "options", ")", ":", "return", "cls", "(", "name", ",", "In", ".", "Header", ",", "type_", ",", "None", ",", "description", ",", "required", "=", "required", ",", "default", "=", "default", ",", "*", "*", "options", ")" ]
Import a Config from a given path relative to the current directory .
def import_config ( config_path ) : if not os . path . isfile ( config_path ) : raise ConfigBuilderError ( 'Could not find config file: ' + config_path ) loader = importlib . machinery . SourceFileLoader ( config_path , config_path ) module = loader . load_module ( ) if not hasattr ( module , 'config' ) or not isinstance ( module . config , Config ) : raise ConfigBuilderError ( 'Could not load config file "{}": config files must contain ' 'a variable called "config" that is ' 'assigned to a Config object.' . format ( config_path ) ) return module . config
12,409
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/api/config.py#L216-L233
[ "def", "get_timestamps", "(", "cols", ",", "created_name", ",", "updated_name", ")", ":", "has_created", "=", "created_name", "in", "cols", "has_updated", "=", "updated_name", "in", "cols", "return", "(", "created_name", "if", "has_created", "else", "None", ",", "updated_name", "if", "has_updated", "else", "None", ")" ]
Grid search using a fitness function over a given number of dimensions and a given step size between inclusive limits of 0 and 1 .
def grid ( fitness_function , no_dimensions , step_size ) : best_fitness = float ( "-inf" ) best_arguments = None for arguments in make_lists ( no_dimensions , step_size ) : fitness = fitness_function ( tuple ( arguments ) ) if fitness > best_fitness : best_fitness = fitness best_arguments = tuple ( arguments ) return best_arguments
12,410
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/optimizer.py#L1-L29
[ "def", "alias_comment", "(", "self", ",", "comment_id", ",", "alias_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.alias_resources_template", "self", ".", "_alias_id", "(", "primary_id", "=", "comment_id", ",", "equivalent_id", "=", "alias_id", ")" ]
Create a list of lists of floats covering every combination across no_dimensions of points of integer step size between 0 and 1 inclusive .
def make_lists ( no_dimensions , step_size , centre_steps = True ) : if no_dimensions == 0 : return [ [ ] ] sub_lists = make_lists ( no_dimensions - 1 , step_size , centre_steps = centre_steps ) return [ [ step_size * value + ( 0.5 * step_size if centre_steps else 0 ) ] + sub_list for value in range ( 0 , int ( ( 1 / step_size ) ) ) for sub_list in sub_lists ]
12,411
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/optimizer.py#L32-L55
[ "def", "process_seq", "(", "seq", ",", "material", ")", ":", "check_alphabet", "(", "seq", ",", "material", ")", "seq", "=", "seq", ".", "upper", "(", ")", "return", "seq" ]
recursively count number of ports without children
def portCnt ( port ) : if port . children : return sum ( map ( lambda p : portCnt ( p ) , port . children ) ) else : return 1
12,412
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L43-L50
[ "def", "_handle_fetch_response", "(", "self", ",", "request", ",", "send_time", ",", "response", ")", ":", "fetch_offsets", "=", "{", "}", "for", "topic", ",", "partitions", "in", "request", ".", "topics", ":", "for", "partition_data", "in", "partitions", ":", "partition", ",", "offset", "=", "partition_data", "[", ":", "2", "]", "fetch_offsets", "[", "TopicPartition", "(", "topic", ",", "partition", ")", "]", "=", "offset", "partitions", "=", "set", "(", "[", "TopicPartition", "(", "topic", ",", "partition_data", "[", "0", "]", ")", "for", "topic", ",", "partitions", "in", "response", ".", "topics", "for", "partition_data", "in", "partitions", "]", ")", "metric_aggregator", "=", "FetchResponseMetricAggregator", "(", "self", ".", "_sensors", ",", "partitions", ")", "# randomized ordering should improve balance for short-lived consumers", "random", ".", "shuffle", "(", "response", ".", "topics", ")", "for", "topic", ",", "partitions", "in", "response", ".", "topics", ":", "random", ".", "shuffle", "(", "partitions", ")", "for", "partition_data", "in", "partitions", ":", "tp", "=", "TopicPartition", "(", "topic", ",", "partition_data", "[", "0", "]", ")", "completed_fetch", "=", "CompletedFetch", "(", "tp", ",", "fetch_offsets", "[", "tp", "]", ",", "response", ".", "API_VERSION", ",", "partition_data", "[", "1", ":", "]", ",", "metric_aggregator", ")", "self", ".", "_completed_fetches", ".", "append", "(", "completed_fetch", ")", "if", "response", ".", "API_VERSION", ">=", "1", ":", "self", ".", "_sensors", ".", "fetch_throttle_time_sensor", ".", "record", "(", "response", ".", "throttle_time_ms", ")", "self", ".", "_sensors", ".", "fetch_latency", ".", "record", "(", "(", "time", ".", "time", "(", ")", "-", "send_time", ")", "*", "1000", ")" ]
Create identical port on targetNode
def copyPort ( port , targetLNode , reverseDir , topPortName = None ) : newP = _copyPort ( port , targetLNode , reverseDir ) if topPortName is not None : newP . name = topPortName return newP
12,413
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L76-L85
[ "def", "schemas_access_for_csv_upload", "(", "self", ")", ":", "if", "not", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ":", "return", "json_error_response", "(", "'No database is allowed for your csv upload'", ")", "db_id", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", "database", "=", "(", "db", ".", "session", ".", "query", "(", "models", ".", "Database", ")", ".", "filter_by", "(", "id", "=", "db_id", ")", ".", "one", "(", ")", ")", "try", ":", "schemas_allowed", "=", "database", ".", "get_schema_access_for_csv_upload", "(", ")", "if", "(", "security_manager", ".", "database_access", "(", "database", ")", "or", "security_manager", ".", "all_datasource_access", "(", ")", ")", ":", "return", "self", ".", "json_response", "(", "schemas_allowed", ")", "# the list schemas_allowed should not be empty here", "# and the list schemas_allowed_processed returned from security_manager", "# should not be empty either,", "# otherwise the database should have been filtered out", "# in CsvToDatabaseForm", "schemas_allowed_processed", "=", "security_manager", ".", "schemas_accessible_by_user", "(", "database", ",", "schemas_allowed", ",", "False", ")", "return", "self", ".", "json_response", "(", "schemas_allowed_processed", ")", "except", "Exception", ":", "return", "json_error_response", "(", "(", "'Failed to fetch schemas allowed for csv upload in this database! '", "'Please contact Superset Admin!\\n\\n'", "'The error message returned was:\\n{}'", ")", ".", "format", "(", "traceback", ".", "format_exc", "(", ")", ")", ")" ]
recursively walk ports without any children
def walkSignalPorts ( rootPort : LPort ) : if rootPort . children : for ch in rootPort . children : yield from walkSignalPorts ( ch ) else : yield rootPort
12,414
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L88-L96
[ "async", "def", "jsk_curl", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ",", "url", ":", "str", ")", ":", "# remove embed maskers if present", "url", "=", "url", ".", "lstrip", "(", "\"<\"", ")", ".", "rstrip", "(", "\">\"", ")", "async", "with", "ReplResponseReactor", "(", "ctx", ".", "message", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "async", "with", "session", ".", "get", "(", "url", ")", "as", "response", ":", "data", "=", "await", "response", ".", "read", "(", ")", "hints", "=", "(", "response", ".", "content_type", ",", "url", ")", "code", "=", "response", ".", "status", "if", "not", "data", ":", "return", "await", "ctx", ".", "send", "(", "f\"HTTP response was empty (status code {code}).\"", ")", "try", ":", "paginator", "=", "WrappedFilePaginator", "(", "io", ".", "BytesIO", "(", "data", ")", ",", "language_hints", "=", "hints", ",", "max_size", "=", "1985", ")", "except", "UnicodeDecodeError", ":", "return", "await", "ctx", ".", "send", "(", "f\"Couldn't determine the encoding of the response. (status code {code})\"", ")", "except", "ValueError", "as", "exc", ":", "return", "await", "ctx", ".", "send", "(", "f\"Couldn't read response (status code {code}), {exc}\"", ")", "interface", "=", "PaginatorInterface", "(", "ctx", ".", "bot", ",", "paginator", ",", "owner", "=", "ctx", ".", "author", ")", "await", "interface", ".", "send_to", "(", "ctx", ")" ]
Prints an agent error and exits
def agent_error ( e : requests . HTTPError , fatal = True ) : try : data = e . response . json ( ) details = data [ 'detail' ] # type: str except JSONDecodeError : details = e . response . text or str ( e . response ) lines = ( '[AGENT] {}' . format ( line ) for line in details . splitlines ( ) ) msg = '\n' + '\n' . join ( lines ) if fatal : fatal_error ( msg ) else : error ( msg )
12,415
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L99-L115
[ "def", "array", "(", "self", ")", ":", "if", "self", ".", "_ind", "<", "self", ".", "shape", ":", "return", "self", ".", "_values", "[", ":", "self", ".", "_ind", "]", "if", "not", "self", ".", "_cached", ":", "ind", "=", "int", "(", "self", ".", "_ind", "%", "self", ".", "shape", ")", "self", ".", "_cache", "[", ":", "self", ".", "shape", "-", "ind", "]", "=", "self", ".", "_values", "[", "ind", ":", "]", "self", ".", "_cache", "[", "self", ".", "shape", "-", "ind", ":", "]", "=", "self", ".", "_values", "[", ":", "ind", "]", "self", ".", "_cached", "=", "True", "return", "self", ".", "_cache" ]
Check if items included in stack_references are Senza definition file paths or stack name reference . If Senza definition file path substitute the definition file path by the stack name in the same position on the list .
def parse_stack_refs ( stack_references : List [ str ] ) -> List [ str ] : stack_names = [ ] references = list ( stack_references ) references . reverse ( ) while references : current = references . pop ( ) # current that might be a file file_path = os . path . abspath ( current ) if os . path . exists ( file_path ) and os . path . isfile ( file_path ) : try : with open ( file_path ) as fd : data = yaml . safe_load ( fd ) current = data [ 'SenzaInfo' ] [ 'StackName' ] except ( KeyError , TypeError , YAMLError ) : raise click . UsageError ( 'Invalid senza definition {}' . format ( current ) ) stack_names . append ( current ) return stack_names
12,416
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L147-L171
[ "def", "_clear_interrupt", "(", "self", ",", "intbit", ")", ":", "int_status", "=", "self", ".", "_device", ".", "readU8", "(", "VCNL4010_INTSTAT", ")", "int_status", "&=", "~", "intbit", "self", ".", "_device", ".", "write8", "(", "VCNL4010_INTSTAT", ",", "int_status", ")" ]
List Lizzy stacks
def list_stacks ( stack_ref : List [ str ] , all : bool , remote : str , region : str , watch : int , output : str ) : lizzy = setup_lizzy_client ( remote ) stack_references = parse_stack_refs ( stack_ref ) while True : rows = [ ] for stack in lizzy . get_stacks ( stack_references , region = region ) : creation_time = dateutil . parser . parse ( stack [ 'creation_time' ] ) rows . append ( { 'stack_name' : stack [ 'stack_name' ] , 'version' : stack [ 'version' ] , 'status' : stack [ 'status' ] , 'creation_time' : creation_time . timestamp ( ) , 'description' : stack [ 'description' ] } ) rows . sort ( key = lambda x : ( x [ 'stack_name' ] , x [ 'version' ] ) ) with OutputFormat ( output ) : print_table ( 'stack_name version status creation_time description' . split ( ) , rows , styles = STYLES , titles = TITLES ) if watch : # pragma: no cover time . sleep ( watch ) click . clear ( ) else : break
12,417
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L348-L374
[ "def", "block", "(", "seed", ")", ":", "num", "=", "SAMPLE_RATE", "*", "BLOCK_SIZE", "rng", "=", "RandomState", "(", "seed", "%", "2", "**", "32", ")", "variance", "=", "SAMPLE_RATE", "/", "2", "return", "rng", ".", "normal", "(", "size", "=", "num", ",", "scale", "=", "variance", "**", "0.5", ")" ]
Manage stack traffic
def traffic ( stack_name : str , stack_version : Optional [ str ] , percentage : Optional [ int ] , region : Optional [ str ] , remote : Optional [ str ] , output : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) if percentage is None : stack_reference = [ stack_name ] with Action ( 'Requesting traffic info..' ) : stack_weights = [ ] for stack in lizzy . get_stacks ( stack_reference , region = region ) : if stack [ 'status' ] in [ 'CREATE_COMPLETE' , 'UPDATE_COMPLETE' ] : stack_id = '{stack_name}-{version}' . format_map ( stack ) traffic = lizzy . get_traffic ( stack_id , region = region ) stack_weights . append ( { 'stack_name' : stack_name , 'version' : stack [ 'version' ] , 'identifier' : stack_id , 'weight%' : traffic [ 'weight' ] } ) cols = 'stack_name version identifier weight%' . split ( ) with OutputFormat ( output ) : print_table ( cols , sorted ( stack_weights , key = lambda x : x [ 'identifier' ] ) ) else : with Action ( 'Requesting traffic change..' ) : stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) ) lizzy . traffic ( stack_id , percentage , region = region )
12,418
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L387-L418
[ "def", "run", "(", "self", ")", ":", "# Create the thread pool.", "executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "self", ".", "_config", "[", "'num_workers'", "]", ")", "# Wait to ensure multiple senders can be synchronised.", "now", "=", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "start_time", "=", "(", "(", "now", "+", "29", ")", "//", "30", ")", "*", "30", "self", ".", "_log", ".", "info", "(", "'Waiting until {}'", ".", "format", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_time", ")", ")", ")", "while", "int", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "timestamp", "(", ")", ")", "<", "start_time", ":", "time", ".", "sleep", "(", "0.1", ")", "# Run the event loop.", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "try", ":", "loop", ".", "run_until_complete", "(", "self", ".", "_run_loop", "(", "executor", ")", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "# Send the end of stream message to each stream.", "self", ".", "_log", ".", "info", "(", "'Shutting down, closing streams...'", ")", "tasks", "=", "[", "]", "for", "stream", ",", "item_group", "in", "self", ".", "_streams", ":", "tasks", ".", "append", "(", "stream", ".", "async_send_heap", "(", "item_group", ".", "get_end", "(", ")", ")", ")", "loop", ".", "run_until_complete", "(", "asyncio", ".", "gather", "(", "*", "tasks", ")", ")", "self", ".", "_log", ".", "info", "(", "'... finished.'", ")", "executor", ".", "shutdown", "(", ")" ]
Rescale a stack
def scale ( stack_name : str , stack_version : Optional [ str ] , new_scale : int , region : Optional [ str ] , remote : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) with Action ( 'Requesting rescale..' ) : stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) ) lizzy . scale ( stack_id , new_scale , region = region )
12,419
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L428-L438
[ "def", "getDefaultItems", "(", "self", ")", ":", "return", "[", "RtiRegItem", "(", "'HDF-5 file'", ",", "'argos.repo.rtiplugins.hdf5.H5pyFileRti'", ",", "extensions", "=", "[", "'hdf5'", ",", "'h5'", ",", "'h5e'", ",", "'he5'", ",", "'nc'", "]", ")", ",", "# hdf extension is for HDF-4", "RtiRegItem", "(", "'MATLAB file'", ",", "'argos.repo.rtiplugins.scipyio.MatlabFileRti'", ",", "extensions", "=", "[", "'mat'", "]", ")", ",", "RtiRegItem", "(", "'NetCDF file'", ",", "'argos.repo.rtiplugins.ncdf.NcdfFileRti'", ",", "#extensions=['nc', 'nc3', 'nc4']),", "extensions", "=", "[", "'nc'", ",", "'nc4'", "]", ")", ",", "#extensions=[]),", "RtiRegItem", "(", "'NumPy binary file'", ",", "'argos.repo.rtiplugins.numpyio.NumpyBinaryFileRti'", ",", "extensions", "=", "[", "'npy'", "]", ")", ",", "RtiRegItem", "(", "'NumPy compressed file'", ",", "'argos.repo.rtiplugins.numpyio.NumpyCompressedFileRti'", ",", "extensions", "=", "[", "'npz'", "]", ")", ",", "RtiRegItem", "(", "'NumPy text file'", ",", "'argos.repo.rtiplugins.numpyio.NumpyTextFileRti'", ",", "#extensions=['txt', 'text']),", "extensions", "=", "[", "'dat'", "]", ")", ",", "RtiRegItem", "(", "'IDL save file'", ",", "'argos.repo.rtiplugins.scipyio.IdlSaveFileRti'", ",", "extensions", "=", "[", "'sav'", "]", ")", ",", "RtiRegItem", "(", "'Pandas CSV file'", ",", "'argos.repo.rtiplugins.pandasio.PandasCsvFileRti'", ",", "extensions", "=", "[", "'csv'", "]", ")", ",", "RtiRegItem", "(", "'Pillow image'", ",", "'argos.repo.rtiplugins.pillowio.PillowFileRti'", ",", "extensions", "=", "[", "'bmp'", ",", "'eps'", ",", "'im'", ",", "'gif'", ",", "'jpg'", ",", "'jpeg'", ",", "'msp'", ",", "'pcx'", ",", "'png'", ",", "'ppm'", ",", "'spi'", ",", "'tif'", ",", "'tiff'", ",", "'xbm'", ",", "'xv'", "]", ")", ",", "RtiRegItem", "(", "'Wav file'", ",", "'argos.repo.rtiplugins.scipyio.WavFileRti'", ",", "extensions", "=", "[", "'wav'", "]", ")", "]" ]
Delete Cloud Formation stacks
def delete ( stack_ref : List [ str ] , region : str , dry_run : bool , force : bool , remote : str ) : lizzy = setup_lizzy_client ( remote ) stack_refs = get_stack_refs ( stack_ref ) all_with_version = all ( stack . version is not None for stack in stack_refs ) # this is misleading but it's the current behaviour of senza # TODO Lizzy list (stack_refs) to see if it actually matches more than one stack # to match senza behaviour if ( not all_with_version and not dry_run and not force ) : fatal_error ( 'Error: {} matching stacks found. ' . format ( len ( stack_refs ) ) + 'Please use the "--force" flag if you really want to delete multiple stacks.' ) # TODO pass force option to agent output = '' for stack in stack_refs : if stack . version is not None : stack_id = '{stack.name}-{stack.version}' . format ( stack = stack ) else : stack_id = stack . name with Action ( "Requesting stack '{stack_id}' deletion.." , stack_id = stack_id ) : output = lizzy . delete ( stack_id , region = region , dry_run = dry_run ) print ( output )
12,420
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L449-L478
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "where_queries", "=", "[", "]", "params", "=", "{", "}", "if", "args", ".", "watching", "or", "args", ".", "available", ":", "where_queries", ".", "append", "(", "'regexp IS NOT NULL'", ")", "if", "args", ".", "query", ":", "where_queries", ".", "append", "(", "'title LIKE :title'", ")", "params", "[", "'title'", "]", "=", "_compile_sql_query", "(", "args", ".", "query", ")", "if", "not", "where_queries", ":", "print", "(", "'Must include at least one filter.'", ")", "return", "where_query", "=", "' AND '", ".", "join", "(", "where_queries", ")", "logger", ".", "debug", "(", "'Search where %s with params %s'", ",", "where_query", ",", "params", ")", "results", "=", "list", "(", ")", "all_files", "=", "[", "filename", "for", "filename", "in", "_find_files", "(", "state", ".", "config", "[", "'anime'", "]", ".", "getpath", "(", "'watchdir'", ")", ")", "if", "_is_video", "(", "filename", ")", "]", "for", "anime", "in", "query", ".", "select", ".", "select", "(", "state", ".", "db", ",", "where_query", ",", "params", ")", ":", "logger", ".", "debug", "(", "'For anime %s with regexp %s'", ",", "anime", ".", "aid", ",", "anime", ".", "regexp", ")", "if", "anime", ".", "regexp", "is", "not", "None", ":", "anime_files", "=", "AnimeFiles", "(", "anime", ".", "regexp", ",", "all_files", ")", "logger", ".", "debug", "(", "'Found files %s'", ",", "anime_files", ".", "filenames", ")", "query", ".", "files", ".", "cache_files", "(", "state", ".", "db", ",", "anime", ".", "aid", ",", "anime_files", ")", "available", "=", "anime_files", ".", "available_string", "(", "anime", ".", "watched_episodes", ")", "else", ":", "available", "=", "''", "if", "not", "args", ".", "available", "or", "available", ":", "results", ".", "append", "(", "(", "anime", ".", "aid", ",", "anime", ".", "title", ",", "anime", ".", "type", ",", "'{}/{}'", ".", "format", "(", "anime", ".", "watched_episodes", ",", "anime", ".", "episodecount", ")", ",", "'yes'", "if", "anime", ".", "complete", "else", "''", ",", "available", ",", ")", ")", "state", ".", "results", "[", "'db'", "]", ".", "set", "(", "results", ")", "state", ".", "results", "[", "'db'", "]", ".", "print", "(", ")" ]
Create an XML file .
def pydict2xml ( filename , metadata_dict , * * kwargs ) : try : f = open ( filename , 'w' ) f . write ( pydict2xmlstring ( metadata_dict , * * kwargs ) . encode ( 'utf-8' ) ) f . close ( ) except : raise MetadataGeneratorException ( 'Failed to create an XML file. Filename: %s' % ( filename ) )
12,421
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L96-L109
[ "def", "find_best_frametype", "(", "channel", ",", "start", ",", "end", ",", "frametype_match", "=", "None", ",", "allow_tape", "=", "True", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "try", ":", "return", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'error'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "RuntimeError", ":", "# gaps (or something else went wrong)", "ftout", "=", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "return_all", "=", "True", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'ignore'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "try", ":", "if", "isinstance", "(", "ftout", ",", "dict", ")", ":", "return", "{", "key", ":", "ftout", "[", "key", "]", "[", "0", "]", "for", "key", "in", "ftout", "}", "return", "ftout", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot find any valid frametypes for channel(s)\"", ")" ]
Create an XML string from a metadata dictionary .
def pydict2xmlstring ( metadata_dict , * * kwargs ) : ordering = kwargs . get ( 'ordering' , UNTL_XML_ORDER ) root_label = kwargs . get ( 'root_label' , 'metadata' ) root_namespace = kwargs . get ( 'root_namespace' , None ) elements_namespace = kwargs . get ( 'elements_namespace' , None ) namespace_map = kwargs . get ( 'namespace_map' , None ) root_attributes = kwargs . get ( 'root_attributes' , None ) # Set any root namespace and namespace map. if root_namespace and namespace_map : root = Element ( root_namespace + root_label , nsmap = namespace_map ) elif namespace_map : root = Element ( root_label , nsmap = namespace_map ) else : root = Element ( root_label ) # Set any root element attributes. if root_attributes : for key , value in root_attributes . items ( ) : root . attrib [ key ] = value # Create an XML structure from field list. for metadata_key in ordering : if metadata_key in metadata_dict : for element in metadata_dict [ metadata_key ] : if 'content' in element and 'qualifier' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'qualifier' : element [ 'qualifier' ] } , namespace = elements_namespace , ) elif 'content' in element and 'role' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'role' : element [ 'role' ] } , namespace = elements_namespace , ) elif 'content' in element and 'scheme' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'scheme' : element [ 'scheme' ] } , namespace = elements_namespace , ) elif 'content' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , namespace = elements_namespace , ) # Create the XML tree. return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True )
12,422
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L112-L170
[ "def", "find_best_frametype", "(", "channel", ",", "start", ",", "end", ",", "frametype_match", "=", "None", ",", "allow_tape", "=", "True", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "try", ":", "return", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'error'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "RuntimeError", ":", "# gaps (or something else went wrong)", "ftout", "=", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "return_all", "=", "True", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'ignore'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "try", ":", "if", "isinstance", "(", "ftout", ",", "dict", ")", ":", "return", "{", "key", ":", "ftout", "[", "key", "]", "[", "0", "]", "for", "key", "in", "ftout", "}", "return", "ftout", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot find any valid frametypes for channel(s)\"", ")" ]
Create a XML subelement from a Python dictionary .
def create_dict_subelement ( root , subelement , content , * * kwargs ) : attribs = kwargs . get ( 'attribs' , None ) namespace = kwargs . get ( 'namespace' , None ) key = subelement # Add subelement's namespace and attributes. if namespace and attribs : subelement = SubElement ( root , namespace + subelement , attribs ) elif namespace : subelement = SubElement ( root , namespace + subelement ) elif attribs : subelement = SubElement ( root , subelement , attribs ) # Otherwise, create SubElement without any extra data. else : subelement = SubElement ( root , subelement ) if not isinstance ( content , dict ) : subelement . text = content # Do special case ordering for degree children on etd_ms. elif key == 'degree' : for degree_order_key in DEGREE_ORDER : for descriptor , value in content . items ( ) : if descriptor == degree_order_key : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value else : for descriptor , value in content . items ( ) : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value
12,423
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L173-L201
[ "def", "find_best_frametype", "(", "channel", ",", "start", ",", "end", ",", "frametype_match", "=", "None", ",", "allow_tape", "=", "True", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "try", ":", "return", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'error'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "RuntimeError", ":", "# gaps (or something else went wrong)", "ftout", "=", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "return_all", "=", "True", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'ignore'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "try", ":", "if", "isinstance", "(", "ftout", ",", "dict", ")", ":", "return", "{", "key", ":", "ftout", "[", "key", "]", "[", "0", "]", "for", "key", "in", "ftout", "}", "return", "ftout", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot find any valid frametypes for channel(s)\"", ")" ]
Create an XML string from the highwire data dictionary .
def highwiredict2xmlstring ( highwire_elements , ordering = HIGHWIRE_ORDER ) : # Sort the elements by the ordering list. highwire_elements . sort ( key = lambda obj : ordering . index ( obj . name ) ) root = Element ( 'metadata' ) for element in highwire_elements : attribs = { 'name' : element . name , 'content' : element . content } SubElement ( root , 'meta' , attribs ) # Create the XML tree. return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True )
12,424
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L204-L216
[ "def", "_wait_for_save", "(", "nb_name", ",", "timeout", "=", "5", ")", ":", "modification_time", "=", "os", ".", "path", ".", "getmtime", "(", "nb_name", ")", "start_time", "=", "time", ".", "time", "(", ")", "while", "time", ".", "time", "(", ")", "<", "start_time", "+", "timeout", ":", "if", "(", "os", ".", "path", ".", "getmtime", "(", "nb_name", ")", ">", "modification_time", "and", "os", ".", "path", ".", "getsize", "(", "nb_name", ")", ">", "0", ")", ":", "return", "True", "time", ".", "sleep", "(", "0.2", ")", "return", "False" ]
return a valid path to the given binary . Return an error if no existing binary can be found .
def get ( binary_name ) : if binary_name not in binaries : raise Exception ( 'binary_name: {0} not found' . format ( binary_name ) ) system = platform . system ( ) binary_list = binaries [ binary_name ] [ system ] # check list for a valid entry for filename in binary_list : valid_file = shutil . which ( filename ) if valid_file : return os . path . abspath ( valid_file )
12,425
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/binaries.py#L79-L103
[ "def", "_domain_event_rtc_change_cb", "(", "conn", ",", "domain", ",", "utcoffset", ",", "opaque", ")", ":", "_salt_send_domain_event", "(", "opaque", ",", "conn", ",", "domain", ",", "opaque", "[", "'event'", "]", ",", "{", "'utcoffset'", ":", "utcoffset", "}", ")" ]
Wait until you can get the lock then yield it and eventually release it .
def get_upgrade_lock ( dbname , connect_str , timeout = LOCK_TIMEOUT ) : # # Open connection and try to get the lock # engine = sqlalchemy . create_engine ( connect_str ) cursor = engine . execute ( "SELECT GET_LOCK('upgrade_{}', {})" . format ( dbname , timeout ) ) lock = cursor . scalar ( ) cursor . close ( ) # # Keep trying until you get it. # while not lock : logger . info ( 'Cannot acquire {} upgrade lock. Sleeping {} seconds.' . format ( dbname , timeout ) ) time . sleep ( timeout ) cursor = engine . execute ( "SELECT GET_LOCK('upgrade_{}', {})" . format ( dbname , timeout ) ) lock = cursor . scalar ( ) cursor . close ( ) logger . info ( 'Acquired {} upgrade lock' . format ( dbname ) ) yield lock # # Release the lock and close the connection. # cursor = engine . execute ( "SELECT RELEASE_LOCK('upgrade_{}')" . format ( dbname ) ) cursor . close ( ) engine . dispose ( ) logger . info ( 'Released {} upgrade lock' . format ( dbname ) )
12,426
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/migrate.py#L25-L61
[ "def", "_strip_ctype", "(", "name", ",", "ctype", ",", "protocol", "=", "2", ")", ":", "# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')", "try", ":", "name", ",", "ctypestr", "=", "name", ".", "rsplit", "(", "','", ",", "1", ")", "except", "ValueError", ":", "pass", "else", ":", "ctype", "=", "Nds2ChannelType", ".", "find", "(", "ctypestr", ")", ".", "value", "# NDS1 stores channels with trend suffix, so we put it back:", "if", "protocol", "==", "1", "and", "ctype", "in", "(", "Nds2ChannelType", ".", "STREND", ".", "value", ",", "Nds2ChannelType", ".", "MTREND", ".", "value", ")", ":", "name", "+=", "',{0}'", ".", "format", "(", "ctypestr", ")", "return", "name", ",", "ctype" ]
Get the database s upgrade lock and run alembic .
def upgrade ( dbname , connect_str , alembic_conf ) : # # The db has to exist before we can get the lock. On the off-chance that another process creates the db between # checking if it exists and running the create, ignore the exception. # if not sqlalchemy_utils . database_exists ( connect_str ) : logger . info ( 'Creating {}' . format ( dbname ) ) try : sqlalchemy_utils . create_database ( connect_str ) except sqlalchemy . exc . ProgrammingError as exc : if not sqlalchemy_utils . database_exists ( connect_str ) : logger . error ( 'Could not create {}' . format ( dbname ) ) raise exc with get_upgrade_lock ( dbname , connect_str ) : alembic_config = alembic . config . Config ( alembic_conf , attributes = { 'configure_logger' : False } ) logger . info ( 'Upgrading {} to head' . format ( dbname ) ) alembic . command . upgrade ( alembic_config , 'head' )
12,427
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/migrate.py#L64-L90
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'matching_results'", "in", "_dict", ":", "args", "[", "'matching_results'", "]", "=", "_dict", ".", "get", "(", "'matching_results'", ")", "if", "'results'", "in", "_dict", ":", "args", "[", "'results'", "]", "=", "[", "LogQueryResponseResult", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'results'", ")", ")", "]", "return", "cls", "(", "*", "*", "args", ")" ]
Write the configuration to a file . Use the correct order of values .
def write_to_file ( self , filename ) : fid = open ( filename , 'w' ) for key in self . key_order : if ( key == - 1 ) : fid . write ( '\n' ) else : fid . write ( '{0}\n' . format ( self [ key ] ) ) fid . close ( )
12,428
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/cfg.py#L216-L227
[ "def", "removeAllEntitlements", "(", "self", ",", "appId", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"appId\"", ":", "appId", "}", "url", "=", "self", ".", "_url", "+", "\"/licenses/removeAllEntitlements\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Parse import path . Determine if the path is native or starts with known prefix .
def parse ( self , importpath ) : # reset default values self . native = False self . _prefix = "" self . _package = "" url = re . sub ( r'http://' , '' , importpath ) url = re . sub ( r'https://' , '' , url ) # is import path native package? if url . split ( '/' ) [ 0 ] in self . native_packages [ "packages" ] : self . native = True return self for regex in self . known_ipprefixes : match = re . search ( regex , url ) if match : self . _prefix = match . group ( 1 ) if match . group ( 3 ) : self . _package = match . group ( 3 ) return self raise ValueError ( "Import path prefix for '%s' not recognized" % importpath )
12,429
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/importpath/parser.py#L22-L50
[ "def", "_requires_submission", "(", "self", ")", ":", "if", "self", ".", "dbcon_part", "is", "None", ":", "return", "False", "tables", "=", "get_table_list", "(", "self", ".", "dbcon_part", ")", "nrows", "=", "0", "for", "table", "in", "tables", ":", "if", "table", "==", "'__submissions__'", ":", "continue", "nrows", "+=", "get_number_of_rows", "(", "self", ".", "dbcon_part", ",", "table", ")", "if", "nrows", ":", "logger", ".", "debug", "(", "'%d new statistics were added since the last submission.'", "%", "nrows", ")", "else", ":", "logger", ".", "debug", "(", "'No new statistics were added since the last submission.'", ")", "t0", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "s", "=", "self", "[", "'__submissions__'", "]", "last_submission", "=", "s", ".", "get_last", "(", "1", ")", "if", "last_submission", ":", "logger", ".", "debug", "(", "'Last submission was %s'", "%", "last_submission", "[", "0", "]", "[", "'Time'", "]", ")", "t_ref", "=", "datetime", ".", "datetime", ".", "strptime", "(", "last_submission", "[", "0", "]", "[", "'Time'", "]", ",", "Table", ".", "time_fmt", ")", "else", ":", "t_ref", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "self", ".", "filepath", ")", ")", "submission_interval_passed", "=", "(", "t0", "-", "t_ref", ")", ".", "total_seconds", "(", ")", ">", "self", ".", "submit_interval_s", "submission_required", "=", "bool", "(", "submission_interval_passed", "and", "nrows", ")", "if", "submission_required", ":", "logger", ".", "debug", "(", "'A submission is overdue.'", ")", "else", ":", "logger", ".", "debug", "(", "'No submission required.'", ")", "return", "submission_required" ]
r Remove all tabs and convert them into spaces .
def sub_retab ( match ) : before = match . group ( 1 ) tabs = len ( match . group ( 2 ) ) return before + ( ' ' * ( TAB_SIZE * tabs - len ( before ) % TAB_SIZE ) )
12,430
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L81-L101
[ "def", "addIVMInputs", "(", "imageObjectList", ",", "ivmlist", ")", ":", "if", "ivmlist", "is", "None", ":", "return", "for", "img", ",", "ivmname", "in", "zip", "(", "imageObjectList", ",", "ivmlist", ")", ":", "img", ".", "updateIVMName", "(", "ivmname", ")" ]
r Handles whitespace cleanup .
def handle_whitespace ( text ) : text = re_retab . sub ( sub_retab , text ) text = re_whitespace . sub ( '' , text ) . strip ( ) return text
12,431
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L103-L111
[ "def", "__get_value", "(", "self", ",", "bundleId", ",", "languageId", ",", "resourceKey", ",", "fallback", "=", "False", ")", ":", "resourceEntryData", "=", "self", ".", "__get_resource_entry_data", "(", "bundleId", "=", "bundleId", ",", "languageId", "=", "languageId", ",", "resourceKey", "=", "resourceKey", ",", "fallback", "=", "fallback", ")", "if", "not", "resourceEntryData", ":", "return", "None", "value", "=", "resourceEntryData", ".", "get", "(", "self", ".", "__RESPONSE_TRANSLATION_KEY", ")", "return", "value" ]
Extracts variables that can be used in templating engines .
def get_variables ( text ) : variables = { var : value for var , value in re_vars . findall ( text ) } text = re_vars . sub ( '' , text ) return text , variables
12,432
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L120-L137
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Retrieves all link references within the text .
def get_references ( text ) : references = { } for ref_id , link , _ , title in re_references . findall ( text ) : ref_id = re . sub ( r'<(.*?)>' , r'\1' , ref_id ) . lower ( ) . strip ( ) references [ ref_id ] = ( link , title ) text = re_references . sub ( '' , text ) return text , references
12,433
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L155-L180
[ "def", "union", "(", "self", ",", "rdds", ")", ":", "first_jrdd_deserializer", "=", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", "if", "any", "(", "x", ".", "_jrdd_deserializer", "!=", "first_jrdd_deserializer", "for", "x", "in", "rdds", ")", ":", "rdds", "=", "[", "x", ".", "_reserialize", "(", ")", "for", "x", "in", "rdds", "]", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "api", ".", "java", ".", "JavaRDD", "jrdds", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "rdds", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rdds", ")", ")", ":", "jrdds", "[", "i", "]", "=", "rdds", "[", "i", "]", ".", "_jrdd", "return", "RDD", "(", "self", ".", "_jsc", ".", "union", "(", "jrdds", ")", ",", "self", ",", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Retrieves all footnote backreferences within the text .
def get_footnote_backreferences ( text , markdown_obj ) : footnotes = OrderedDict ( ) for footnote_id , footnote in re_footnote_backreferences . findall ( text ) : footnote_id = re . sub ( r'<(.*?)>' , r'\1' , footnote_id ) . lower ( ) . strip ( ) footnote = re . sub ( r'^[ ]{0,4}' , '' , footnote , flags = re . M ) footnotes [ footnote_id ] = footnote text = re_footnote_backreferences . sub ( '' , text ) return text , footnotes
12,434
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L194-L221
[ "def", "make_client", "(", "zhmc", ",", "userid", "=", "None", ",", "password", "=", "None", ")", ":", "global", "USERID", ",", "PASSWORD", "# pylint: disable=global-statement", "USERID", "=", "userid", "or", "USERID", "or", "six", ".", "input", "(", "'Enter userid for HMC {}: '", ".", "format", "(", "zhmc", ")", ")", "PASSWORD", "=", "password", "or", "PASSWORD", "or", "getpass", ".", "getpass", "(", "'Enter password for {}: '", ".", "format", "(", "USERID", ")", ")", "session", "=", "zhmcclient", ".", "Session", "(", "zhmc", ",", "USERID", ",", "PASSWORD", ")", "session", ".", "logon", "(", ")", "client", "=", "zhmcclient", ".", "Client", "(", "session", ")", "print", "(", "'Established logged-on session with HMC {} using userid {}'", ".", "format", "(", "zhmc", ",", "USERID", ")", ")", "return", "client" ]
Hashes HTML block tags .
def hash_blocks ( text , hashes ) : def sub ( match ) : block = match . group ( 1 ) hashed = hash_text ( block , 'block' ) hashes [ hashed ] = block return '\n\n' + hashed + '\n\n' return re_block . sub ( sub , text )
12,435
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L282-L299
[ "def", "_apply_mask", "(", "self", ")", ":", "w", "=", "self", ".", "_w", "w_shape", "=", "w", ".", "get_shape", "(", ")", "mask_shape", "=", "self", ".", "_mask", ".", "get_shape", "(", ")", "if", "mask_shape", ".", "ndims", ">", "w_shape", ".", "ndims", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"Invalid mask shape: {}. Max shape: {}\"", ".", "format", "(", "mask_shape", ".", "ndims", ",", "len", "(", "self", ".", "_data_format", ")", ")", ")", "if", "mask_shape", "!=", "w_shape", "[", ":", "mask_shape", ".", "ndims", "]", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"Invalid mask shape: {}. Weight shape: {}\"", ".", "format", "(", "mask_shape", ",", "w_shape", ")", ")", "# TF broadcasting is a bit fragile.", "# Expand the shape of self._mask by one dim at a time to the right", "# until the rank matches `weight_shape`.", "while", "self", ".", "_mask", ".", "get_shape", "(", ")", ".", "ndims", "<", "w_shape", ".", "ndims", ":", "self", ".", "_mask", "=", "tf", ".", "expand_dims", "(", "self", ".", "_mask", ",", "-", "1", ")", "# tf.Variable & tf.ResourceVariable don't support *=.", "w", "=", "w", "*", "self", ".", "_mask", "# pylint: disable=g-no-augmented-assignment", "return", "w" ]
Hashes ordered and unordered lists .
def hash_lists ( text , hashes , markdown_obj ) : for style , marker in ( ( 'u' , '[+*-]' ) , ( 'o' , r'\d+\.' ) ) : list_re = re . compile ( re_list % ( marker , marker ) , re . S | re . X ) # import pdb # pdb.set_trace() for match in list_re . finditer ( text ) : if not match : continue lst = match . group ( 1 ) items = re . split ( r'(?:\n|\A) {0,3}%s ' % marker , lst ) [ 1 : ] whole_list = '' for item in items : item = re . sub ( r'^ {1,4}' , '' , item , flags = re . M ) item = markdown_obj . convert ( item ) par_match = re . match ( '<p>(.*?)</p>' , item , flags = re . S ) if par_match and par_match . group ( 0 ) == item . strip ( ) : item = par_match . group ( 1 ) whole_list += '<li>{}</li>\n' . format ( item ) whole_list = '<{0}l>\n{1}\n</{0}l>' . format ( style , re . sub ( '^' , ' ' , whole_list . strip ( ) , flags = re . M ) ) hashed = hash_text ( whole_list , 'list' ) hashes [ hashed ] = whole_list start = text . index ( match . group ( 0 ) ) end = start + len ( match . group ( 0 ) ) text = text [ : start ] + '\n\n' + hashed + '\n\n' + text [ end : ] return text
12,436
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L320-L370
[ "def", "stop_capture", "(", "self", ")", ":", "if", "self", ".", "_capture_node", ":", "yield", "from", "self", ".", "_capture_node", "[", "\"node\"", "]", ".", "post", "(", "\"/adapters/{adapter_number}/ports/{port_number}/stop_capture\"", ".", "format", "(", "adapter_number", "=", "self", ".", "_capture_node", "[", "\"adapter_number\"", "]", ",", "port_number", "=", "self", ".", "_capture_node", "[", "\"port_number\"", "]", ")", ")", "self", ".", "_capture_node", "=", "None", "yield", "from", "super", "(", ")", ".", "stop_capture", "(", ")" ]
Hashes block quotes .
def hash_blockquotes ( text , hashes , markdown_obj ) : def sub ( match ) : block = match . group ( 1 ) . strip ( ) block = re . sub ( r'(?:(?<=\n)|(?<=\A))> ?' , '' , block ) block = markdown_obj . convert ( block ) block = '<blockquote>{}</blockquote>' . format ( block ) hashed = hash_text ( block , 'blockquote' ) hashes [ hashed ] = block return '\n\n' + hashed + '\n\n' return re_blockquote . sub ( sub , text )
12,437
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L418-L438
[ "def", "deregister_image", "(", "self", ",", "image_id", ",", "delete_snapshot", "=", "False", ")", ":", "snapshot_id", "=", "None", "if", "delete_snapshot", ":", "image", "=", "self", ".", "get_image", "(", "image_id", ")", "for", "key", "in", "image", ".", "block_device_mapping", ":", "if", "key", "==", "\"/dev/sda1\"", ":", "snapshot_id", "=", "image", ".", "block_device_mapping", "[", "key", "]", ".", "snapshot_id", "break", "result", "=", "self", ".", "get_status", "(", "'DeregisterImage'", ",", "{", "'ImageId'", ":", "image_id", "}", ",", "verb", "=", "'POST'", ")", "if", "result", "and", "snapshot_id", ":", "return", "result", "and", "self", ".", "delete_snapshot", "(", "snapshot_id", ")", "return", "result" ]
Hashes inline code tags .
def hash_codes ( text , hashes ) : def sub ( match ) : code = '<code>{}</code>' . format ( escape ( match . group ( 2 ) ) ) hashed = hash_text ( code , 'code' ) hashes [ hashed ] = code return hashed return re_code . sub ( sub , text )
12,438
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L491-L506
[ "def", "to_vobject", "(", "self", ",", "filename", "=", "None", ",", "uid", "=", "None", ")", ":", "self", ".", "_update", "(", ")", "cal", "=", "iCalendar", "(", ")", "if", "uid", ":", "self", ".", "_gen_vevent", "(", "self", ".", "_reminders", "[", "filename", "]", "[", "uid", "]", ",", "cal", ".", "add", "(", "'vevent'", ")", ")", "elif", "filename", ":", "for", "event", "in", "self", ".", "_reminders", "[", "filename", "]", ".", "values", "(", ")", ":", "self", ".", "_gen_vevent", "(", "event", ",", "cal", ".", "add", "(", "'vevent'", ")", ")", "else", ":", "for", "filename", "in", "self", ".", "_reminders", ":", "for", "event", "in", "self", ".", "_reminders", "[", "filename", "]", ".", "values", "(", ")", ":", "self", ".", "_gen_vevent", "(", "event", ",", "cal", ".", "add", "(", "'vevent'", ")", ")", "return", "cal" ]
Hashes any non - block tags .
def hash_tags ( text , hashes ) : def sub ( match ) : hashed = hash_text ( match . group ( 0 ) , 'tag' ) hashes [ hashed ] = match . group ( 0 ) return hashed return re_tag . sub ( sub , text )
12,439
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L634-L650
[ "def", "url_to_resource", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "get_current_request", "(", ")", "# cnv = request.registry.getAdapter(request, IResourceUrlConverter)", "reg", "=", "get_current_registry", "(", ")", "cnv", "=", "reg", ".", "getAdapter", "(", "request", ",", "IResourceUrlConverter", ")", "return", "cnv", ".", "url_to_resource", "(", "url", ")" ]
Unhashes all hashed entites in the hashes dictionary .
def unhash ( text , hashes ) : def retrieve_match ( match ) : return hashes [ match . group ( 0 ) ] while re_hash . search ( text ) : text = re_hash . sub ( retrieve_match , text ) text = re_pre_tag . sub ( lambda m : re . sub ( '^' + m . group ( 1 ) , '' , m . group ( 0 ) , flags = re . M ) , text ) return text
12,440
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L673-L685
[ "def", "WriteFD", "(", "self", ",", "Channel", ",", "MessageBuffer", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_WriteFD", "(", "Channel", ",", "byref", "(", "MessageBuffer", ")", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.WriteFD\"", ")", "raise" ]
Captures paragraphs .
def paragraph_sub ( match ) : text = re . sub ( r' \n' , r'\n<br/>\n' , match . group ( 0 ) . strip ( ) ) return '<p>{}</p>' . format ( text )
12,441
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L845-L848
[ "def", "_deleteTrackers", "(", "self", ",", "trackers", ")", ":", "for", "tracker", "in", "trackers", ":", "if", "tracker", ".", "store", "is", "None", ":", "# we're not updating the list of live signups client side, so", "# we might get a signup that has already been deleted", "continue", "sig", "=", "tracker", ".", "signupItem", "# XXX the only reason we're doing this here is that we're afraid to", "# add a whenDeleted=CASCADE to powerups because it's inefficient,", "# however, this is arguably the archetypical use of", "# whenDeleted=CASCADE. Soon we need to figure out a real solution", "# (but I have no idea what it is). -glyph", "for", "iface", "in", "sig", ".", "store", ".", "interfacesFor", "(", "sig", ")", ":", "sig", ".", "store", ".", "powerDown", "(", "sig", ",", "iface", ")", "tracker", ".", "deleteFromStore", "(", ")", "sig", ".", "deleteFromStore", "(", ")" ]
Create a set of all nodes containg the root_nodes and all nodes reacheable from them
def truncateGraph ( graph , root_nodes ) : subgraph = Graph ( ) for node in root_nodes : subgraph = GraphUtils . joinGraphs ( subgraph , GraphUtils . getReacheableSubgraph ( graph , node ) ) return subgraph
12,442
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/graphs/graphutils.py#L136-L144
[ "def", "_encode_header", "(", "key", ":", "str", ",", "pdict", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "str", ":", "if", "not", "pdict", ":", "return", "key", "out", "=", "[", "key", "]", "# Sort the parameters just to make it easy to test.", "for", "k", ",", "v", "in", "sorted", "(", "pdict", ".", "items", "(", ")", ")", ":", "if", "v", "is", "None", ":", "out", ".", "append", "(", "k", ")", "else", ":", "# TODO: quote if necessary.", "out", ".", "append", "(", "\"%s=%s\"", "%", "(", "k", ",", "v", ")", ")", "return", "\"; \"", ".", "join", "(", "out", ")" ]
Remove all nodes for with node_fnc does not hold
def filterGraph ( graph , node_fnc ) : nodes = filter ( lambda l : node_fnc ( l ) , graph . nodes ( ) ) edges = { } gedges = graph . edges ( ) for u in gedges : if u not in nodes : continue for v in gedges [ u ] : if v not in nodes : continue try : edges [ u ] . append ( v ) except KeyError : edges [ u ] = [ v ] return Graph ( nodes , edges )
12,443
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/graphs/graphutils.py#L147-L165
[ "def", "_PathStripPrefix", "(", "self", ",", "path", ")", ":", "if", "path", ".", "startswith", "(", "'\\\\\\\\.\\\\'", ")", "or", "path", ".", "startswith", "(", "'\\\\\\\\?\\\\'", ")", ":", "if", "len", "(", "path", ")", "<", "7", "or", "path", "[", "5", "]", "!=", "':'", "or", "path", "[", "6", "]", "!=", "self", ".", "_PATH_SEPARATOR", ":", "# Cannot handle a non-volume path.", "return", "None", "path", "=", "path", "[", "7", ":", "]", "elif", "path", ".", "startswith", "(", "'\\\\\\\\'", ")", ":", "# Cannot handle an UNC path.", "return", "None", "elif", "len", "(", "path", ")", ">=", "3", "and", "path", "[", "1", "]", "==", "':'", ":", "# Check if the path is a Volume 'absolute' path.", "if", "path", "[", "2", "]", "!=", "self", ".", "_PATH_SEPARATOR", ":", "# Cannot handle a Volume 'relative' path.", "return", "None", "path", "=", "path", "[", "3", ":", "]", "elif", "path", ".", "startswith", "(", "'\\\\'", ")", ":", "path", "=", "path", "[", "1", ":", "]", "else", ":", "# Cannot handle a relative path.", "return", "None", "return", "path" ]
Return a list of all non dotfiles in a given directory .
def listdir ( self , path ) : for f in os . listdir ( path ) : if not f . startswith ( '.' ) : yield f
12,444
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L27-L33
[ "def", "adhoc_metric_to_sqla", "(", "self", ",", "metric", ",", "cols", ")", ":", "expression_type", "=", "metric", ".", "get", "(", "'expressionType'", ")", "label", "=", "utils", ".", "get_metric_name", "(", "metric", ")", "if", "expression_type", "==", "utils", ".", "ADHOC_METRIC_EXPRESSION_TYPES", "[", "'SIMPLE'", "]", ":", "column_name", "=", "metric", ".", "get", "(", "'column'", ")", ".", "get", "(", "'column_name'", ")", "table_column", "=", "cols", ".", "get", "(", "column_name", ")", "if", "table_column", ":", "sqla_column", "=", "table_column", ".", "get_sqla_col", "(", ")", "else", ":", "sqla_column", "=", "column", "(", "column_name", ")", "sqla_metric", "=", "self", ".", "sqla_aggregations", "[", "metric", ".", "get", "(", "'aggregate'", ")", "]", "(", "sqla_column", ")", "elif", "expression_type", "==", "utils", ".", "ADHOC_METRIC_EXPRESSION_TYPES", "[", "'SQL'", "]", ":", "sqla_metric", "=", "literal_column", "(", "metric", ".", "get", "(", "'sqlExpression'", ")", ")", "else", ":", "return", "None", "return", "self", ".", "make_sqla_column_compatible", "(", "sqla_metric", ",", "label", ")" ]
Create list of absolute paths to be used to instantiate path objects for traversal based on whether or not hidden attribute is set .
def getchildren ( self ) : try : if self . hidden : return [ os . path . join ( self . name , child ) for child in sorted ( self . listdir ( self . name ) ) ] else : return [ os . path . join ( self . name , child ) for child in sorted ( os . listdir ( self . name ) ) ] except OSError : return None
12,445
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L35-L48
[ "def", "manual_update_license", "(", "self", ",", "fd", ",", "filename", "=", "'cdrouter.lic'", ")", ":", "schema", "=", "UpgradeSchema", "(", ")", "resp", "=", "self", ".", "service", ".", "post", "(", "self", ".", "base", "+", "'license/'", ",", "files", "=", "{", "'file'", ":", "(", "filename", ",", "fd", ")", "}", ")", "return", "self", ".", "service", ".", "decode", "(", "schema", ",", "resp", ")" ]
If we have children use a list comprehension to instantiate new paths objects to traverse .
def getpaths ( self ) : self . children = self . getchildren ( ) if self . children is None : return if self . paths is None : self . paths = [ Paths ( self . screen , os . path . join ( self . name , child ) , self . hidden , self . picked , self . expanded , self . sized ) for child in self . children ] return self . paths
12,446
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L50-L66
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Recursive generator that lazily unfolds the filesystem .
def traverse ( self ) : yield self , 0 if self . name in self . expanded : for path in self . getpaths ( ) : for child , depth in path . traverse ( ) : yield child , depth + 1
12,447
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L68-L76
[ "def", "customer_discount_webhook_handler", "(", "event", ")", ":", "crud_type", "=", "CrudType", ".", "determine", "(", "event", "=", "event", ")", "discount_data", "=", "event", ".", "data", ".", "get", "(", "\"object\"", ",", "{", "}", ")", "coupon_data", "=", "discount_data", ".", "get", "(", "\"coupon\"", ",", "{", "}", ")", "customer", "=", "event", ".", "customer", "if", "crud_type", ".", "created", "or", "crud_type", ".", "updated", ":", "coupon", ",", "_", "=", "_handle_crud_like_event", "(", "target_cls", "=", "models", ".", "Coupon", ",", "event", "=", "event", ",", "data", "=", "coupon_data", ",", "id", "=", "coupon_data", ".", "get", "(", "\"id\"", ")", ")", "coupon_start", "=", "discount_data", ".", "get", "(", "\"start\"", ")", "coupon_end", "=", "discount_data", ".", "get", "(", "\"end\"", ")", "else", ":", "coupon", "=", "None", "coupon_start", "=", "None", "coupon_end", "=", "None", "customer", ".", "coupon", "=", "coupon", "customer", ".", "coupon_start", "=", "convert_tstamp", "(", "coupon_start", ")", "customer", ".", "coupon_end", "=", "convert_tstamp", "(", "coupon_end", ")", "customer", ".", "save", "(", ")" ]
Compute the intersection point of two lines
def line_line_intersect ( x , y ) : A = x [ 0 ] * y [ 1 ] - y [ 0 ] * x [ 1 ] B = x [ 2 ] * y [ 3 ] - y [ 2 ] * x [ 4 ] C = ( x [ 0 ] - x [ 1 ] ) * ( y [ 2 ] - y [ 3 ] ) - ( y [ 0 ] - y [ 1 ] ) * ( x [ 2 ] - x [ 3 ] ) Ix = ( A * ( x [ 2 ] - x [ 3 ] ) - ( x [ 0 ] - x [ 1 ] ) * B ) / C Iy = ( A * ( y [ 2 ] - y [ 3 ] ) - ( y [ 0 ] - y [ 1 ] ) * B ) / C return Ix , Iy
12,448
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/grid_extralines_gen_decouplings.py#L116-L137
[ "def", "memory_read64", "(", "self", ",", "addr", ",", "num_long_words", ")", ":", "buf_size", "=", "num_long_words", "buf", "=", "(", "ctypes", ".", "c_ulonglong", "*", "buf_size", ")", "(", ")", "units_read", "=", "self", ".", "_dll", ".", "JLINKARM_ReadMemU64", "(", "addr", ",", "buf_size", ",", "buf", ",", "0", ")", "if", "units_read", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "units_read", ")", "return", "buf", "[", ":", "units_read", "]" ]
Returns the path of a file installed along the package
def pkg_data_filename ( resource_name , filename = None ) : resource_filename = pkg_resources . resource_filename ( tripleohelper . __name__ , resource_name ) if filename is not None : resource_filename = os . path . join ( resource_filename , filename ) return resource_filename
12,449
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/utils.py#L23-L32
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Merge the current branch into master .
def merge ( config ) : repo = config . repo active_branch = repo . active_branch if active_branch . name == "master" : error_out ( "You're already on the master branch." ) if repo . is_dirty ( ) : error_out ( 'Repo is "dirty". ({})' . format ( ", " . join ( [ repr ( x . b_path ) for x in repo . index . diff ( None ) ] ) ) ) branch_name = active_branch . name state = read ( config . configfile ) origin_name = state . get ( "ORIGIN_NAME" , "origin" ) upstream_remote = None for remote in repo . remotes : if remote . name == origin_name : upstream_remote = remote break if not upstream_remote : error_out ( "No remote called {!r} found" . format ( origin_name ) ) repo . heads . master . checkout ( ) upstream_remote . pull ( repo . heads . master ) repo . git . merge ( branch_name ) repo . git . branch ( "-d" , branch_name ) success_out ( "Branch {!r} deleted." . format ( branch_name ) ) info_out ( "NOW, you might want to run:\n" ) info_out ( "git push origin master\n\n" ) push_for_you = input ( "Run that push? [Y/n] " ) . lower ( ) . strip ( ) != "n" if push_for_you : upstream_remote . push ( "master" ) success_out ( "Current master pushed to {}" . format ( upstream_remote . name ) )
12,450
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/merge/gg_merge.py#L8-L48
[ "def", "_remove_monitor", "(", "monitors", ",", "handle", ",", "devices", ",", "events", ")", ":", "empty_devices", "=", "[", "]", "for", "conn_string", "in", "devices", ":", "data", "=", "monitors", ".", "get", "(", "conn_string", ")", "if", "data", "is", "None", ":", "continue", "for", "event", "in", "events", ":", "event_dict", "=", "data", ".", "get", "(", "event", ")", "if", "event_dict", "is", "None", ":", "continue", "if", "handle", "in", "event_dict", ":", "del", "event_dict", "[", "handle", "]", "if", "len", "(", "event_dict", ")", "==", "0", ":", "del", "data", "[", "event", "]", "if", "len", "(", "data", ")", "==", "0", ":", "empty_devices", ".", "append", "(", "conn_string", ")", "return", "empty_devices" ]
u Override of the default task decorator to specify use of this backend .
def chord_task ( * args , * * kwargs ) : given_backend = kwargs . get ( u'backend' , None ) if not isinstance ( given_backend , ChordableDjangoBackend ) : kwargs [ u'backend' ] = ChordableDjangoBackend ( kwargs . get ( 'app' , current_app ) ) return task ( * args , * * kwargs )
12,451
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L89-L96
[ "def", "private_messenger", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PRIVATE_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "message", "=", "json", ".", "loads", "(", "line", ")", "WebSocketHandler", ".", "send_private_message", "(", "user_id", "=", "message", "[", "'user_id'", "]", ",", "message", "=", "message", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PRIVATE_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
u Clean up expired records .
def _cleanup ( self , status , expires_multiplier = 1 ) : # self.expires is inherited, and defaults to 1 day (or setting CELERY_TASK_RESULT_EXPIRES) expires = self . expires if isinstance ( self . expires , timedelta ) else timedelta ( seconds = self . expires ) expires = expires * expires_multiplier chords_to_delete = ChordData . objects . filter ( callback_result__date_done__lte = datetime . now ( ) - expires , callback_result__status = status ) . iterator ( ) for _chord in chords_to_delete : subtask_ids = [ subtask . task_id for subtask in _chord . completed_results . all ( ) ] _chord . completed_results . clear ( ) TaskMeta . objects . filter ( task_id__in = subtask_ids ) . delete ( ) _chord . callback_result . delete ( ) _chord . delete ( )
12,452
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L113-L133
[ "def", "build_mock_open_side_effect", "(", "string_d", ",", "stream_d", ")", ":", "assert", "(", "len", "(", "set", "(", "string_d", ".", "keys", "(", ")", ")", ".", "intersection", "(", "set", "(", "stream_d", ".", "keys", "(", ")", ")", ")", ")", "==", "0", ")", "def", "mock_open_side_effect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "[", "0", "]", "in", "string_d", ":", "return", "StringIO", ".", "StringIO", "(", "string_d", "[", "args", "[", "0", "]", "]", ")", "elif", "args", "[", "0", "]", "in", "stream_d", ":", "return", "stream_d", "[", "args", "[", "0", "]", "]", "else", ":", "raise", "IOError", "(", "\"No such file: \"", "+", "args", "[", "0", "]", ")", "return", "mock_open_side_effect" ]
u Update the linking ChordData object and execute callback if needed .
def on_chord_part_return ( self , task , state , result , propagate = False ) : # pylint: disable=redefined-outer-name with transaction . atomic ( ) : chord_data = ChordData . objects . select_for_update ( ) . get ( # select_for_update will prevent race conditions callback_result__task_id = task . request . chord [ u'options' ] [ u'task_id' ] ) _ = TaskMeta . objects . update_or_create ( task_id = task . request . id , defaults = { u'status' : state , u'result' : result } ) if chord_data . is_ready ( ) : # we don't use celery beat, so this is as good a place as any to fire off periodic cleanup tasks self . get_suitable_app ( current_app ) . tasks [ u'celery.backend_cleanup' ] . apply_async ( ) chord_data . execute_callback ( )
12,453
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L151-L179
[ "def", "export", "(", "self", ",", "name", ",", "columns", ",", "points", ")", ":", "WHITELIST", "=", "'_-'", "+", "string", ".", "ascii_letters", "+", "string", ".", "digits", "SUBSTITUTE", "=", "'_'", "def", "whitelisted", "(", "s", ",", "whitelist", "=", "WHITELIST", ",", "substitute", "=", "SUBSTITUTE", ")", ":", "return", "''", ".", "join", "(", "c", "if", "c", "in", "whitelist", "else", "substitute", "for", "c", "in", "s", ")", "for", "sensor", ",", "value", "in", "zip", "(", "columns", ",", "points", ")", ":", "try", ":", "sensor", "=", "[", "whitelisted", "(", "name", ")", "for", "name", "in", "sensor", ".", "split", "(", "'.'", ")", "]", "tobeexport", "=", "[", "self", ".", "topic", ",", "self", ".", "hostname", ",", "name", "]", "tobeexport", ".", "extend", "(", "sensor", ")", "topic", "=", "'/'", ".", "join", "(", "tobeexport", ")", "self", ".", "client", ".", "publish", "(", "topic", ",", "value", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Can not export stats to MQTT server (%s)\"", "%", "e", ")" ]
u Instantiate a linking ChordData object before executing subtasks .
def apply_chord ( self , header , partial_args , group_id , body , * * options ) : callback_entry = TaskMeta . objects . create ( task_id = body . id ) chord_data = ChordData . objects . create ( callback_result = callback_entry ) for subtask in header : subtask_entry = TaskMeta . objects . create ( task_id = subtask . id ) chord_data . completed_results . add ( subtask_entry ) if body . options . get ( u'use_iterator' , None ) is None : body . options [ u'use_iterator' ] = True chord_data . serialized_callback = json . dumps ( body ) chord_data . save ( ) return header ( * partial_args , task_id = group_id )
12,454
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L181-L208
[ "def", "private_messenger", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PRIVATE_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "message", "=", "json", ".", "loads", "(", "line", ")", "WebSocketHandler", ".", "send_private_message", "(", "user_id", "=", "message", "[", "'user_id'", "]", ",", "message", "=", "message", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PRIVATE_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
u Return a clone of given_app with ChordableDjangoBackend if needed .
def get_suitable_app ( cls , given_app ) : if not isinstance ( getattr ( given_app , 'backend' , None ) , ChordableDjangoBackend ) : return_app = deepcopy ( given_app ) return_app . backend = ChordableDjangoBackend ( return_app ) return return_app else : return given_app
12,455
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L221-L230
[ "def", "_loop", "(", "self", ")", ":", "self", ".", "stop_flag", ".", "value", "=", "False", "self", ".", "time_started", ".", "value", "=", "time", "(", ")", "self", ".", "time_elapsed", ".", "value", "=", "0", "while", "True", ":", "if", "self", ".", "stop_flag", ".", "value", ":", "break", "self", ".", "update_text", "(", ")", "with", "self", ".", "time_started", ".", "get_lock", "(", ")", ":", "start", "=", "self", ".", "time_started", ".", "value", "with", "self", ".", "time_elapsed", ".", "get_lock", "(", ")", ":", "self", ".", "time_elapsed", ".", "value", "=", "time", "(", ")", "-", "start", "if", "(", "self", ".", "timeout", ".", "value", "and", "(", "self", ".", "time_elapsed", ".", "value", ">", "self", ".", "timeout", ".", "value", ")", ")", ":", "self", ".", "stop", "(", ")", "raise", "ProgressTimedOut", "(", "self", ".", "name", ",", "self", ".", "time_elapsed", ".", "value", ",", ")" ]
Create a PriorModel wrapping the specified class with attributes from this instance . Priors can be overridden using keyword arguments . Any constructor arguments of the new class for which there is no attribute associated with this class and no keyword argument are created from config .
def linked_model_for_class ( self , cls , make_constants_variable = False , * * kwargs ) : constructor_args = inspect . getfullargspec ( cls ) . args attribute_tuples = self . attribute_tuples new_model = PriorModel ( cls ) for attribute_tuple in attribute_tuples : name = attribute_tuple . name if name in constructor_args or ( is_tuple_like_attribute_name ( name ) and tuple_name ( name ) in constructor_args ) : attribute = kwargs [ name ] if name in kwargs else attribute_tuple . value if make_constants_variable and isinstance ( attribute , Constant ) : new_attribute = getattr ( new_model , name ) if isinstance ( new_attribute , Prior ) : new_attribute . mean = attribute . value continue setattr ( new_model , name , attribute ) return new_model
12,456
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L293-L331
[ "def", "wait_for_responses", "(", "self", ")", ":", "self", ".", "thread", ".", "join", "(", "self", ".", "COMMAND_RESPONSE_TIMEOUT_S", ")", "self", ".", "running", "=", "False", "return", "self", ".", "responses" ]
Create an instance of the associated class for a set of arguments
def instance_for_arguments ( self , arguments : { Prior : float } ) : for prior , value in arguments . items ( ) : prior . assert_within_limits ( value ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } constant_arguments = { t . name : t . constant . value for t in self . direct_constant_tuples } for tuple_prior in self . tuple_prior_tuples : model_arguments [ tuple_prior . name ] = tuple_prior . prior . value_for_arguments ( arguments ) for prior_model_tuple in self . direct_prior_model_tuples : model_arguments [ prior_model_tuple . name ] = prior_model_tuple . prior_model . instance_for_arguments ( arguments ) return self . cls ( * * { * * model_arguments , * * constant_arguments } )
12,457
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L422-L444
[ "def", "dump", "(", "self", ")", ":", "try", ":", "topo", "=", "project_to_topology", "(", "self", ")", "path", "=", "self", ".", "_topology_file", "(", ")", "log", ".", "debug", "(", "\"Write %s\"", ",", "path", ")", "with", "open", "(", "path", "+", "\".tmp\"", ",", "\"w+\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "json", ".", "dump", "(", "topo", ",", "f", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "shutil", ".", "move", "(", "path", "+", "\".tmp\"", ",", "path", ")", "except", "OSError", "as", "e", ":", "raise", "aiohttp", ".", "web", ".", "HTTPInternalServerError", "(", "text", "=", "\"Could not write topology: {}\"", ".", "format", "(", "e", ")", ")" ]
Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \ nonlinear search .
def gaussian_prior_model_for_arguments ( self , arguments ) : new_model = copy . deepcopy ( self ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } for tuple_prior_tuple in self . tuple_prior_tuples : setattr ( new_model , tuple_prior_tuple . name , tuple_prior_tuple . prior . gaussian_tuple_prior_for_arguments ( arguments ) ) for prior_tuple in self . direct_prior_tuples : setattr ( new_model , prior_tuple . name , model_arguments [ prior_tuple . name ] ) for constant_tuple in self . constant_tuples : setattr ( new_model , constant_tuple . name , constant_tuple . constant ) for name , prior_model in self . direct_prior_model_tuples : setattr ( new_model , name , prior_model . gaussian_prior_model_for_arguments ( arguments ) ) return new_model
12,458
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L446-L476
[ "def", "compare", "(", "left", ":", "Optional", "[", "L", "]", ",", "right", ":", "Optional", "[", "R", "]", ")", "->", "'Comparison[L, R]'", ":", "if", "isinstance", "(", "left", ",", "File", ")", "and", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "FileDirectoryComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "and", "isinstance", "(", "right", ",", "File", ")", ":", "return", "DirectoryFileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "File", ")", "or", "isinstance", "(", "right", ",", "File", ")", ":", "return", "FileComparison", "(", "left", ",", "right", ")", "if", "isinstance", "(", "left", ",", "Directory", ")", "or", "isinstance", "(", "right", ",", "Directory", ")", ":", "return", "DirectoryComparison", "(", "left", ",", "right", ")", "raise", "TypeError", "(", "f'Cannot compare entities: {left}, {right}'", ")" ]
Refresh local content for a single post from the the WordPress REST API . This can be called from a webhook on the WordPress side when a post is updated .
def load_post ( self , wp_post_id ) : path = "sites/{}/posts/{}" . format ( self . site_id , wp_post_id ) response = self . get ( path ) if response . ok and response . text : api_post = response . json ( ) self . get_ref_data_map ( bulk_mode = False ) self . load_wp_post ( api_post , bulk_mode = False ) # the post should exist in the db now, so return it so that callers can work with it try : post = Post . objects . get ( site_id = self . site_id , wp_id = wp_post_id ) except Exception as ex : logger . exception ( "Unable to load post with wp_post_id={}:\n{}" . format ( wp_post_id , ex . message ) ) else : return post else : logger . warning ( "Unable to load post with wp_post_id={}:\n{}" . format ( wp_post_id , response . text ) )
12,459
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L70-L96
[ "def", "repositories", "(", "self", ")", ":", "return", "RepositoriesDataFrame", "(", "self", ".", "__engine", ".", "getRepositories", "(", ")", ",", "self", ".", "session", ",", "self", ".", "__implicits", ")" ]
Load all WordPress categories from the given site .
def load_categories ( self , max_pages = 30 ) : logger . info ( "loading categories" ) # clear them all out so we don't get dupes if requested if self . purge_first : Category . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/categories" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_categories = response . json ( ) . get ( "categories" ) if not api_categories : # we're done here break categories = [ ] for api_category in api_categories : # if it exists locally, update local version if anything has changed existing_category = Category . objects . filter ( site_id = self . site_id , wp_id = api_category [ "ID" ] ) . first ( ) if existing_category : self . update_existing_category ( existing_category , api_category ) else : categories . append ( self . get_new_category ( api_category ) ) if categories : Category . objects . bulk_create ( categories ) elif not self . full : # we're done here break # get next page page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return
12,460
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L154-L207
[ "def", "path", "(", "path_name", "=", "None", ",", "override", "=", "None", ",", "*", ",", "root", "=", "None", ",", "name", "=", "None", ",", "ext", "=", "None", ",", "inject", "=", "None", ",", "relpath", "=", "None", ",", "reduce", "=", "False", ")", ":", "path_name", ",", "identity", ",", "root", "=", "_initialize", "(", "path_name", ",", "override", ",", "root", ",", "inject", ")", "new_name", "=", "_process_name", "(", "path_name", ",", "identity", ",", "name", ",", "ext", ")", "new_directory", "=", "_process_directory", "(", "path_name", ",", "identity", ",", "root", ",", "inject", ")", "full_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "new_directory", ",", "new_name", ")", ")", "if", "APPEND_SEP_TO_DIRS", "and", "not", "new_name", "and", "full_path", "[", "-", "1", "]", "!=", "os", ".", "sep", ":", "full_path", "+=", "os", ".", "sep", "final_path", "=", "_format_path", "(", "full_path", ",", "root", ",", "relpath", ",", "reduce", ")", "return", "final_path" ]
Instantiate a new Category from api data .
def get_new_category ( self , api_category ) : return Category ( site_id = self . site_id , wp_id = api_category [ "ID" ] , * * self . api_object_data ( "category" , api_category ) )
12,461
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L209-L218
[ "def", "cli", "(", "sock", ",", "configs", ",", "modules", ",", "files", ",", "log", ",", "debug", ")", ":", "setup_logging", "(", "log", ",", "debug", ")", "config", "=", "join_configs", "(", "configs", ")", "# load python modules", "load_modules", "(", "modules", ")", "# load python files", "load_files", "(", "files", ")", "# summarize active events and callbacks", "summarize_events", "(", ")", "gloop", "=", "gevent", ".", "Greenlet", ".", "spawn", "(", "loop", ",", "sock", "=", "sock", ",", "config", "=", "config", ")", "gloop", ".", "start", "(", ")", "gloop", ".", "join", "(", ")" ]
Load all WordPress tags from the given site .
def load_tags ( self , max_pages = 30 ) : logger . info ( "loading tags" ) # clear them all out so we don't get dupes if requested if self . purge_first : Tag . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/tags" . format ( self . site_id ) params = { "number" : 1000 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_tags = response . json ( ) . get ( "tags" ) if not api_tags : # we're done here break tags = [ ] for api_tag in api_tags : # if it exists locally, update local version if anything has changed existing_tag = Tag . objects . filter ( site_id = self . site_id , wp_id = api_tag [ "ID" ] ) . first ( ) if existing_tag : self . update_existing_tag ( existing_tag , api_tag ) else : tags . append ( self . get_new_tag ( api_tag ) ) if tags : Tag . objects . bulk_create ( tags ) elif not self . full : # we're done here break # get next page page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return
12,462
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L220-L273
[ "def", "getOverlayTransformTrackedDeviceComponent", "(", "self", ",", "ulOverlayHandle", ",", "pchComponentName", ",", "unComponentNameSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceComponent", "punDeviceIndex", "=", "TrackedDeviceIndex_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "punDeviceIndex", ")", ",", "pchComponentName", ",", "unComponentNameSize", ")", "return", "result", ",", "punDeviceIndex" ]
Instantiate a new Tag from api data .
def get_new_tag ( self , api_tag ) : return Tag ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , * * self . api_object_data ( "tag" , api_tag ) )
12,463
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L275-L284
[ "def", "cli", "(", "sock", ",", "configs", ",", "modules", ",", "files", ",", "log", ",", "debug", ")", ":", "setup_logging", "(", "log", ",", "debug", ")", "config", "=", "join_configs", "(", "configs", ")", "# load python modules", "load_modules", "(", "modules", ")", "# load python files", "load_files", "(", "files", ")", "# summarize active events and callbacks", "summarize_events", "(", ")", "gloop", "=", "gevent", ".", "Greenlet", ".", "spawn", "(", "loop", ",", "sock", "=", "sock", ",", "config", "=", "config", ")", "gloop", ".", "start", "(", ")", "gloop", ".", "join", "(", ")" ]
Load all WordPress authors from the given site .
def load_authors ( self , max_pages = 10 ) : logger . info ( "loading authors" ) # clear them all out so we don't get dupes if requested if self . purge_first : Author . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/users" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_users = response . json ( ) . get ( "users" ) if not api_users : # we're done here break authors = [ ] for api_author in api_users : # if it exists locally, update local version if anything has changed existing_author = Author . objects . filter ( site_id = self . site_id , wp_id = api_author [ "ID" ] ) . first ( ) if existing_author : self . update_existing_author ( existing_author , api_author ) else : authors . append ( self . get_new_author ( api_author ) ) if authors : Author . objects . bulk_create ( authors ) elif not self . full : # we're done here break # get next page # this endpoint doesn't have a page param, so use offset params [ "offset" ] = page * 100 page += 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return
12,464
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L286-L340
[ "def", "remove_expired_multipartobjects", "(", ")", ":", "delta", "=", "current_app", ".", "config", "[", "'FILES_REST_MULTIPART_EXPIRES'", "]", "expired_dt", "=", "datetime", ".", "utcnow", "(", ")", "-", "delta", "file_ids", "=", "[", "]", "for", "mp", "in", "MultipartObject", ".", "query_expired", "(", "expired_dt", ")", ":", "file_ids", ".", "append", "(", "str", "(", "mp", ".", "file_id", ")", ")", "mp", ".", "delete", "(", ")", "for", "fid", "in", "file_ids", ":", "remove_file_data", ".", "delay", "(", "fid", ")" ]
Instantiate a new Author from api data .
def get_new_author ( self , api_author ) : return Author ( site_id = self . site_id , wp_id = api_author [ "ID" ] , * * self . api_object_data ( "author" , api_author ) )
12,465
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L342-L351
[ "def", "wait_until_page_does_not_contain_these_elements", "(", "self", ",", "timeout", ",", "*", "locators", ")", ":", "self", ".", "_wait_until_no_error", "(", "timeout", ",", "self", ".", "_wait_for_elements_to_go_away", ",", "locators", ")" ]
Load all WordPress media from the given site .
def load_media ( self , max_pages = 150 ) : logger . info ( "loading media" ) # clear them all out so we don't get dupes if self . purge_first : logger . warning ( "purging ALL media from site %s" , self . site_id ) Media . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/media" . format ( self . site_id ) params = { "number" : 100 } self . set_media_params_after ( params ) page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_medias = response . json ( ) . get ( "media" ) if not api_medias : # we're done here break medias = [ ] for api_media in api_medias : # exclude media items that are not attached to posts (for now) if api_media [ "post_ID" ] != 0 : # if it exists locally, update local version if anything has changed existing_media = Media . objects . filter ( site_id = self . site_id , wp_id = api_media [ "ID" ] ) . first ( ) if existing_media : self . update_existing_media ( existing_media , api_media ) else : medias . append ( self . get_new_media ( api_media ) ) if medias : Media . objects . bulk_create ( medias ) # get next page page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return
12,466
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L353-L408
[ "def", "mean", "(", "name", ",", "add", ",", "match", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'result'", ":", "True", "}", "if", "name", "not", "in", "__reg__", ":", "__reg__", "[", "name", "]", "=", "{", "}", "__reg__", "[", "name", "]", "[", "'val'", "]", "=", "0", "__reg__", "[", "name", "]", "[", "'total'", "]", "=", "0", "__reg__", "[", "name", "]", "[", "'count'", "]", "=", "0", "for", "event", "in", "__events__", ":", "try", ":", "event_data", "=", "event", "[", "'data'", "]", "[", "'data'", "]", "except", "KeyError", ":", "event_data", "=", "event", "[", "'data'", "]", "if", "salt", ".", "utils", ".", "stringutils", ".", "expr_match", "(", "event", "[", "'tag'", "]", ",", "match", ")", ":", "if", "add", "in", "event_data", ":", "try", ":", "comp", "=", "int", "(", "event_data", ")", "except", "ValueError", ":", "continue", "__reg__", "[", "name", "]", "[", "'total'", "]", "+=", "comp", "__reg__", "[", "name", "]", "[", "'count'", "]", "+=", "1", "__reg__", "[", "name", "]", "[", "'val'", "]", "=", "__reg__", "[", "name", "]", "[", "'total'", "]", "/", "__reg__", "[", "name", "]", "[", "'count'", "]", "return", "ret" ]
Instantiate a new Media from api data .
def get_new_media ( self , api_media ) : return Media ( site_id = self . site_id , wp_id = api_media [ "ID" ] , * * self . api_object_data ( "media" , api_media ) )
12,467
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L427-L436
[ "def", "_value_wrapper", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "int", ",", "float", ",", ")", ")", ":", "return", "'=%s'", "%", "value", "elif", "isinstance", "(", "value", ",", "(", "str", ",", "unicode", ")", ")", ":", "value", "=", "value", ".", "strip", "(", ")", "# LIKE", "if", "RE_LIKE", ".", "match", "(", "value", ")", ":", "return", "' LIKE %s'", "%", "repr", "(", "RE_LIKE", ".", "match", "(", "value", ")", ".", "group", "(", "'RE_LIKE'", ")", ")", "# REGEXP", "elif", "RE_REGEXP", ".", "match", "(", "value", ")", ":", "return", "' REGEXP %s'", "%", "repr", "(", "RE_REGEXP", ".", "search", "(", "value", ")", ".", "group", "(", "'RE_REGEXP'", ")", ")", "else", ":", "return", "'=%s'", "%", "repr", "(", "value", ")", "elif", "value", "is", "None", ":", "return", "' ISNULL'" ]
Get referential data from the local db into the self . ref_data_map dictionary . This allows for fast FK lookups when looping through posts .
def get_ref_data_map ( self , bulk_mode = True ) : if bulk_mode : self . ref_data_map = { "authors" : { a . wp_id : a for a in Author . objects . filter ( site_id = self . site_id ) } , "categories" : { c . wp_id : c for c in Category . objects . filter ( site_id = self . site_id ) } , "tags" : { t . wp_id : t for t in Tag . objects . filter ( site_id = self . site_id ) } , "media" : { m . wp_id : m for m in Media . objects . filter ( site_id = self . site_id ) } } else : # in single post mode, WP ref data is handled dynamically for the post self . ref_data_map = { "authors" : { } , "categories" : { } , "tags" : { } , "media" : { } }
12,468
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L438-L461
[ "def", "GetAdGroups", "(", "self", ",", "client_customer_id", ",", "campaign_id", ")", ":", "self", ".", "client", ".", "SetClientCustomerId", "(", "client_customer_id", ")", "selector", "=", "{", "'fields'", ":", "[", "'Id'", ",", "'Name'", ",", "'Status'", "]", ",", "'predicates'", ":", "[", "{", "'field'", ":", "'CampaignId'", ",", "'operator'", ":", "'EQUALS'", ",", "'values'", ":", "[", "campaign_id", "]", "}", ",", "{", "'field'", ":", "'Status'", ",", "'operator'", ":", "'NOT_EQUALS'", ",", "'values'", ":", "[", "'REMOVED'", "]", "}", "]", "}", "adgroups", "=", "self", ".", "client", ".", "GetService", "(", "'AdGroupService'", ")", ".", "get", "(", "selector", ")", "if", "int", "(", "adgroups", "[", "'totalNumEntries'", "]", ")", ">", "0", ":", "return", "adgroups", "[", "'entries'", "]", "else", ":", "return", "None" ]
Load all WordPress posts of a given post_type from a site .
def load_posts ( self , post_type = None , max_pages = 200 , status = None ) : logger . info ( "loading posts with post_type=%s" , post_type ) # clear them all out so we don't get dupes if self . purge_first : Post . objects . filter ( site_id = self . site_id , post_type = post_type ) . delete ( ) path = "sites/{}/posts" . format ( self . site_id ) # type allows us to pull information about pages, attachments, guest-authors, etc. # you know, posts that aren't posts... thank you WordPress! if not post_type : post_type = "post" if not status : status = "publish" params = { "number" : self . batch_size , "type" : post_type , "status" : status } self . set_posts_param_modified_after ( params , post_type , status ) # get first page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) # process all posts in the response self . process_posts_response ( response , path , params , max_pages )
12,469
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L463-L498
[ "def", "_update_offset_file", "(", "self", ")", ":", "if", "self", ".", "on_update", ":", "self", ".", "on_update", "(", ")", "offset", "=", "self", ".", "_filehandle", "(", ")", ".", "tell", "(", ")", "inode", "=", "stat", "(", "self", ".", "filename", ")", ".", "st_ino", "fh", "=", "open", "(", "self", ".", "_offset_file", ",", "\"w\"", ")", "fh", ".", "write", "(", "\"%s\\n%s\\n\"", "%", "(", "inode", ",", "offset", ")", ")", "fh", ".", "close", "(", ")", "self", ".", "_since_update", "=", "0" ]
Set modified_after date to continue where we left off if appropriate
def set_posts_param_modified_after ( self , params , post_type , status ) : if not self . purge_first and not self . full and not self . modified_after : if status == "any" : latest = Post . objects . filter ( post_type = post_type ) . order_by ( "-modified" ) . first ( ) else : latest = Post . objects . filter ( post_type = post_type , status = status ) . order_by ( "-modified" ) . first ( ) if latest : self . modified_after = latest . modified if self . modified_after : params [ "modified_after" ] = self . modified_after . isoformat ( ) logger . info ( "getting posts after: %s" , params [ "modified_after" ] )
12,470
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L500-L519
[ "def", "crc", "(", "self", ")", ":", "# will make sure everything has been transferred", "# to datastore that needs to be before returning crc", "result", "=", "self", ".", "_data", ".", "fast_hash", "(", ")", "if", "hasattr", "(", "self", ".", "mesh", ",", "'crc'", ")", ":", "# bitwise xor combines hashes better than a sum", "result", "^=", "self", ".", "mesh", ".", "crc", "(", ")", "return", "result" ]
Load a single post from API data .
def load_wp_post ( self , api_post , bulk_mode = True , post_categories = None , post_tags = None , post_media_attachments = None , posts = None ) : # initialize reference vars if none supplied if post_categories is None : post_categories = { } if post_tags is None : post_tags = { } if post_media_attachments is None : post_media_attachments = { } if posts is None : posts = [ ] # process objects related to this post author = None if api_post [ "author" ] . get ( "ID" ) : author = self . process_post_author ( bulk_mode , api_post [ "author" ] ) # process many-to-many fields self . process_post_categories ( bulk_mode , api_post , post_categories ) self . process_post_tags ( bulk_mode , api_post , post_tags ) self . process_post_media_attachments ( bulk_mode , api_post , post_media_attachments ) # if this post exists, update it; else create it existing_post = Post . objects . filter ( site_id = self . site_id , wp_id = api_post [ "ID" ] ) . first ( ) if existing_post : self . process_existing_post ( existing_post , api_post , author , post_categories , post_tags , post_media_attachments ) else : self . process_new_post ( bulk_mode , api_post , posts , author , post_categories , post_tags , post_media_attachments ) # if this is a real post (not an attachment, page, etc.), sync child attachments that haven been deleted # these are generally other posts with post_type=attachment representing media that has been "uploaded to the post" # they can be deleted on the WP side, creating an orphan here without this step. if api_post [ "type" ] == "post" : self . sync_deleted_attachments ( api_post )
12,471
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L587-L633
[ "def", "fromtif", "(", "path", ",", "ext", "=", "'tif'", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "nplanes", "=", "None", ",", "npartitions", "=", "None", ",", "labels", "=", "None", ",", "engine", "=", "None", ",", "credentials", "=", "None", ",", "discard_extra", "=", "False", ")", ":", "from", "tifffile", "import", "TiffFile", "if", "nplanes", "is", "not", "None", "and", "nplanes", "<=", "0", ":", "raise", "ValueError", "(", "'nplanes must be positive if passed, got %d'", "%", "nplanes", ")", "def", "getarray", "(", "idx_buffer_filename", ")", ":", "idx", ",", "buf", ",", "fname", "=", "idx_buffer_filename", "fbuf", "=", "BytesIO", "(", "buf", ")", "tfh", "=", "TiffFile", "(", "fbuf", ")", "ary", "=", "tfh", ".", "asarray", "(", ")", "pageCount", "=", "ary", ".", "shape", "[", "0", "]", "if", "nplanes", "is", "not", "None", ":", "extra", "=", "pageCount", "%", "nplanes", "if", "extra", ":", "if", "discard_extra", ":", "pageCount", "=", "pageCount", "-", "extra", "logging", ".", "getLogger", "(", "'thunder'", ")", ".", "warn", "(", "'Ignored %d pages in file %s'", "%", "(", "extra", ",", "fname", ")", ")", "else", ":", "raise", "ValueError", "(", "\"nplanes '%d' does not evenly divide '%d in file %s'\"", "%", "(", "nplanes", ",", "pageCount", ",", "fname", ")", ")", "values", "=", "[", "ary", "[", "i", ":", "(", "i", "+", "nplanes", ")", "]", "for", "i", "in", "range", "(", "0", ",", "pageCount", ",", "nplanes", ")", "]", "else", ":", "values", "=", "[", "ary", "]", "tfh", ".", "close", "(", ")", "if", "ary", ".", "ndim", "==", "3", ":", "values", "=", "[", "val", ".", "squeeze", "(", ")", "for", "val", "in", "values", "]", "nvals", "=", "len", "(", "values", ")", "keys", "=", "[", "(", "idx", "*", "nvals", "+", "timepoint", ",", ")", "for", "timepoint", "in", "range", "(", "nvals", ")", "]", "return", "zip", "(", "keys", ",", "values", ")", "recount", "=", "False", "if", "nplanes", "is", "None", "else", "True", "data", "=", "frompath", "(", "path", ",", "accessor", "=", "getarray", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ",", "npartitions", "=", "npartitions", ",", "recount", "=", "recount", ",", "labels", "=", "labels", ",", "engine", "=", "engine", ",", "credentials", "=", "credentials", ")", "if", "engine", "is", "not", "None", "and", "npartitions", "is", "not", "None", "and", "data", ".", "npartitions", "(", ")", "<", "npartitions", ":", "data", "=", "data", ".", "repartition", "(", "npartitions", ")", "return", "data" ]
Create or update an Author related to a post .
def process_post_author ( self , bulk_mode , api_author ) : # get from the ref data map if in bulk mode, else look it up from the db if bulk_mode : author = self . ref_data_map [ "authors" ] . get ( api_author [ "ID" ] ) if author : self . update_existing_author ( author , api_author ) else : # if the author wasn't found (likely because it's a Byline or guest author, not a user), # go ahead and create the author now author = Author . objects . create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , * * self . api_object_data ( "author" , api_author ) ) else : # do a direct db lookup if we're not in bulk mode author , created = self . get_or_create_author ( api_author ) if author and not created : self . update_existing_author ( author , api_author ) # add to the ref data map so we don't try to create it again if author : self . ref_data_map [ "authors" ] [ api_author [ "ID" ] ] = author return author
12,472
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L635-L664
[ "def", "Log", "(", "self", ",", "frame", ")", ":", "# Return error if log methods were not configured globally.", "if", "not", "self", ".", "_log_message", ":", "return", "{", "'isError'", ":", "True", ",", "'description'", ":", "{", "'format'", ":", "LOG_ACTION_NOT_SUPPORTED", "}", "}", "if", "self", ".", "_quota_recovery_start_time", ":", "ms_elapsed", "=", "(", "time", ".", "time", "(", ")", "-", "self", ".", "_quota_recovery_start_time", ")", "*", "1000", "if", "ms_elapsed", ">", "self", ".", "quota_recovery_ms", ":", "# We are out of the recovery period, clear the time and continue", "self", ".", "_quota_recovery_start_time", "=", "None", "else", ":", "# We are in the recovery period, exit", "return", "# Evaluate watched expressions.", "message", "=", "'LOGPOINT: '", "+", "_FormatMessage", "(", "self", ".", "_definition", ".", "get", "(", "'logMessageFormat'", ",", "''", ")", ",", "self", ".", "_EvaluateExpressions", "(", "frame", ")", ")", "line", "=", "self", ".", "_definition", "[", "'location'", "]", "[", "'line'", "]", "cdbg_logging_location", "=", "(", "NormalizePath", "(", "frame", ".", "f_code", ".", "co_filename", ")", ",", "line", ",", "_GetFrameCodeObjectName", "(", "frame", ")", ")", "if", "native", ".", "ApplyDynamicLogsQuota", "(", "len", "(", "message", ")", ")", ":", "self", ".", "_log_message", "(", "message", ")", "else", ":", "self", ".", "_quota_recovery_start_time", "=", "time", ".", "time", "(", ")", "self", ".", "_log_message", "(", "DYNAMIC_LOG_OUT_OF_QUOTA", ")", "del", "cdbg_logging_location", "return", "None" ]
Find or create an Author object given API data .
def get_or_create_author ( self , api_author ) : return Author . objects . get_or_create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , defaults = self . api_object_data ( "author" , api_author ) )
12,473
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L666-L675
[ "def", "remove", "(", "self", ")", ":", "for", "cgroup", "in", "self", ".", "paths", ":", "remove_cgroup", "(", "cgroup", ")", "del", "self", ".", "paths", "del", "self", ".", "per_subsystem" ]
Create or update Categories related to a post .
def process_post_categories ( self , bulk_mode , api_post , post_categories ) : post_categories [ api_post [ "ID" ] ] = [ ] for api_category in six . itervalues ( api_post [ "categories" ] ) : category = self . process_post_category ( bulk_mode , api_category ) if category : post_categories [ api_post [ "ID" ] ] . append ( category )
12,474
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L677-L690
[ "def", "authenticate", "(", "self", ",", "request", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "isinstance", "(", "username", ",", "basestring", ")", ":", "username", "=", "username", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "password", ",", "basestring", ")", ":", "password", "=", "password", ".", "encode", "(", "'utf-8'", ")", "server", "=", "self", ".", "_get_server_from_settings", "(", ")", "result", "=", "self", ".", "_radius_auth", "(", "server", ",", "username", ",", "password", ")", "if", "result", ":", "return", "self", ".", "get_django_user", "(", "username", ",", "password", ")", "return", "None" ]
Create or update a Category related to a post .
def process_post_category ( self , bulk_mode , api_category ) : category = None # try to get from the ref data map if in bulk mode if bulk_mode : category = self . ref_data_map [ "categories" ] . get ( api_category [ "ID" ] ) # double check the db before giving up, we may have sync'd it in a previous run if not category : category , created = Category . objects . get_or_create ( site_id = self . site_id , wp_id = api_category [ "ID" ] , defaults = self . api_object_data ( "category" , api_category ) ) if category and not created : self . update_existing_category ( category , api_category ) # add to ref data map so later lookups work if category : self . ref_data_map [ "categories" ] [ api_category [ "ID" ] ] = category return category
12,475
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L692-L719
[ "def", "authenticate", "(", "self", ",", "driver", ")", ":", "# 0 1 2 3", "events", "=", "[", "driver", ".", "username_re", ",", "driver", ".", "password_re", ",", "self", ".", "device", ".", "prompt_re", ",", "driver", ".", "rommon_re", ",", "# 4 5 6 7 8", "driver", ".", "unable_to_connect_re", ",", "driver", ".", "authentication_error_re", ",", "pexpect", ".", "TIMEOUT", ",", "pexpect", ".", "EOF", "]", "transitions", "=", "[", "(", "driver", ".", "username_re", ",", "[", "0", "]", ",", "1", ",", "partial", "(", "a_send_username", ",", "self", ".", "username", ")", ",", "10", ")", ",", "(", "driver", ".", "username_re", ",", "[", "1", "]", ",", "1", ",", "None", ",", "10", ")", ",", "(", "driver", ".", "password_re", ",", "[", "0", ",", "1", "]", ",", "2", ",", "partial", "(", "a_send_password", ",", "self", ".", "_acquire_password", "(", ")", ")", ",", "_C", "[", "'first_prompt_timeout'", "]", ")", ",", "(", "driver", ".", "username_re", ",", "[", "2", "]", ",", "-", "1", ",", "a_authentication_error", ",", "0", ")", ",", "(", "driver", ".", "password_re", ",", "[", "2", "]", ",", "-", "1", ",", "a_authentication_error", ",", "0", ")", ",", "(", "driver", ".", "authentication_error_re", ",", "[", "1", ",", "2", "]", ",", "-", "1", ",", "a_authentication_error", ",", "0", ")", ",", "(", "self", ".", "device", ".", "prompt_re", ",", "[", "0", ",", "1", ",", "2", "]", ",", "-", "1", ",", "None", ",", "0", ")", ",", "(", "driver", ".", "rommon_re", ",", "[", "0", "]", ",", "-", "1", ",", "partial", "(", "a_send", ",", "\"\\r\\n\"", ")", ",", "0", ")", ",", "(", "pexpect", ".", "TIMEOUT", ",", "[", "0", "]", ",", "1", ",", "partial", "(", "a_send", ",", "\"\\r\\n\"", ")", ",", "10", ")", ",", "(", "pexpect", ".", "TIMEOUT", ",", "[", "2", "]", ",", "-", "1", ",", "None", ",", "0", ")", ",", "(", "pexpect", ".", "TIMEOUT", ",", "[", "3", ",", "7", "]", ",", "-", "1", ",", "ConnectionTimeoutError", "(", "\"Connection Timeout\"", ",", "self", ".", "hostname", ")", ",", "0", ")", ",", "(", "driver", ".", "unable_to_connect_re", ",", "[", "0", ",", "1", ",", "2", "]", ",", "-", "1", ",", "a_unable_to_connect", ",", "0", ")", ",", "]", "self", ".", "log", "(", "\"EXPECTED_PROMPT={}\"", ".", "format", "(", "pattern_to_str", "(", "self", ".", "device", ".", "prompt_re", ")", ")", ")", "fsm", "=", "FSM", "(", "\"CONSOLE-SERVER-AUTH\"", ",", "self", ".", "device", ",", "events", ",", "transitions", ",", "timeout", "=", "_C", "[", "'connect_timeout'", "]", ",", "init_pattern", "=", "self", ".", "last_pattern", ")", "return", "fsm", ".", "run", "(", ")" ]
Create or update Tags related to a post .
def process_post_tags ( self , bulk_mode , api_post , post_tags ) : post_tags [ api_post [ "ID" ] ] = [ ] for api_tag in six . itervalues ( api_post [ "tags" ] ) : tag = self . process_post_tag ( bulk_mode , api_tag ) if tag : post_tags [ api_post [ "ID" ] ] . append ( tag )
12,476
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L721-L734
[ "def", "reading", "(", "self", ")", ":", "try", ":", "# testing proxy", "proxies", "=", "{", "}", "try", ":", "proxies", "[", "\"http_proxy\"", "]", "=", "os", ".", "environ", "[", "'http_proxy'", "]", "except", "KeyError", ":", "pass", "try", ":", "proxies", "[", "\"https_proxy\"", "]", "=", "os", ".", "environ", "[", "'https_proxy'", "]", "except", "KeyError", ":", "pass", "if", "len", "(", "proxies", ")", "!=", "0", ":", "proxy", "=", "urllib2", ".", "ProxyHandler", "(", "proxies", ")", "opener", "=", "urllib2", ".", "build_opener", "(", "proxy", ")", "urllib2", ".", "install_opener", "(", "opener", ")", "# end testing", "f", "=", "urllib2", ".", "urlopen", "(", "self", ".", "link", ")", "return", "f", ".", "read", "(", ")", "except", "(", "urllib2", ".", "URLError", ",", "ValueError", ")", ":", "print", "(", "\"\\n{0}Can't read the file '{1}'{2}\"", ".", "format", "(", "self", ".", "meta", ".", "color", "[", "\"RED\"", "]", ",", "self", ".", "link", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "return", "\" \"" ]
Create or update a Tag related to a post .
def process_post_tag ( self , bulk_mode , api_tag ) : tag = None # try to get from the ref data map if in bulk mode if bulk_mode : tag = self . ref_data_map [ "tags" ] . get ( api_tag [ "ID" ] ) # double check the db before giving up, we may have sync'd it in a previous run if not tag : tag , created = Tag . objects . get_or_create ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , defaults = self . api_object_data ( "tag" , api_tag ) ) if tag and not created : self . update_existing_tag ( tag , api_tag ) # add to ref data map so later lookups work if tag : self . ref_data_map [ "tags" ] [ api_tag [ "ID" ] ] = tag return tag
12,477
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L736-L762
[ "def", "_evaluate", "(", "self", ",", "message", ")", ":", "return", "eval", "(", "self", ".", "code", ",", "globals", "(", ")", ",", "{", "\"J\"", ":", "message", ",", "\"timedelta\"", ":", "timedelta", ",", "\"datetime\"", ":", "datetime", ",", "\"SKIP\"", ":", "self", ".", "_SKIP", "}", ")" ]
Create or update Media objects related to a post .
def process_post_media_attachments ( self , bulk_mode , api_post , post_media_attachments ) : post_media_attachments [ api_post [ "ID" ] ] = [ ] for api_attachment in six . itervalues ( api_post [ "attachments" ] ) : attachment = self . process_post_media_attachment ( bulk_mode , api_attachment ) if attachment : post_media_attachments [ api_post [ "ID" ] ] . append ( attachment )
12,478
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L764-L778
[ "def", "handle_simulation_end", "(", "self", ",", "data_portal", ")", ":", "log", ".", "info", "(", "'Simulated {} trading days\\n'", "'first open: {}\\n'", "'last close: {}'", ",", "self", ".", "_session_count", ",", "self", ".", "_trading_calendar", ".", "session_open", "(", "self", ".", "_first_session", ")", ",", "self", ".", "_trading_calendar", ".", "session_close", "(", "self", ".", "_last_session", ")", ",", ")", "packet", "=", "{", "}", "self", ".", "end_of_simulation", "(", "packet", ",", "self", ".", "_ledger", ",", "self", ".", "_trading_calendar", ",", "self", ".", "_sessions", ",", "data_portal", ",", "self", ".", "_benchmark_source", ",", ")", "return", "packet" ]
Create or update a Media attached to a post .
def process_post_media_attachment ( self , bulk_mode , api_media_attachment ) : attachment = None # try to get from the ref data map if in bulk mode if bulk_mode : attachment = self . ref_data_map [ "media" ] . get ( api_media_attachment [ "ID" ] ) # double check the db before giving up, we may have sync'd it in a previous run if not attachment : # do a direct db lookup if we're not in bulk mode attachment , created = self . get_or_create_media ( api_media_attachment ) if attachment and not created : self . update_existing_media ( attachment , api_media_attachment ) # add to ref data map so later lookups work if attachment : self . ref_data_map [ "media" ] [ api_media_attachment [ "ID" ] ] = attachment return attachment
12,479
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L780-L805
[ "def", "handle_simulation_end", "(", "self", ",", "data_portal", ")", ":", "log", ".", "info", "(", "'Simulated {} trading days\\n'", "'first open: {}\\n'", "'last close: {}'", ",", "self", ".", "_session_count", ",", "self", ".", "_trading_calendar", ".", "session_open", "(", "self", ".", "_first_session", ")", ",", "self", ".", "_trading_calendar", ".", "session_close", "(", "self", ".", "_last_session", ")", ",", ")", "packet", "=", "{", "}", "self", ".", "end_of_simulation", "(", "packet", ",", "self", ".", "_ledger", ",", "self", ".", "_trading_calendar", ",", "self", ".", "_sessions", ",", "data_portal", ",", "self", ".", "_benchmark_source", ",", ")", "return", "packet" ]
Find or create a Media object given API data .
def get_or_create_media ( self , api_media ) : return Media . objects . get_or_create ( site_id = self . site_id , wp_id = api_media [ "ID" ] , defaults = self . api_object_data ( "media" , api_media ) )
12,480
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L807-L816
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Sync attributes for a single post from WP API data .
def process_existing_post ( existing_post , api_post , author , post_categories , post_tags , post_media_attachments ) : # don't bother checking what's different, just update all fields existing_post . author = author existing_post . post_date = api_post [ "date" ] existing_post . modified = api_post [ "modified" ] existing_post . title = api_post [ "title" ] existing_post . url = api_post [ "URL" ] existing_post . short_url = api_post [ "short_URL" ] existing_post . content = api_post [ "content" ] existing_post . excerpt = api_post [ "excerpt" ] existing_post . slug = api_post [ "slug" ] existing_post . guid = api_post [ "guid" ] existing_post . status = api_post [ "status" ] existing_post . sticky = api_post [ "sticky" ] existing_post . password = api_post [ "password" ] existing_post . parent = api_post [ "parent" ] existing_post . post_type = api_post [ "type" ] existing_post . likes_enabled = api_post [ "likes_enabled" ] existing_post . sharing_enabled = api_post [ "sharing_enabled" ] existing_post . like_count = api_post [ "like_count" ] existing_post . global_ID = api_post [ "global_ID" ] existing_post . featured_image = api_post [ "featured_image" ] existing_post . format = api_post [ "format" ] existing_post . menu_order = api_post [ "menu_order" ] existing_post . metadata = api_post [ "metadata" ] existing_post . post_thumbnail = api_post [ "post_thumbnail" ] WPAPILoader . process_post_many_to_many_field ( existing_post , "categories" , post_categories ) WPAPILoader . process_post_many_to_many_field ( existing_post , "tags" , post_tags ) WPAPILoader . process_post_many_to_many_field ( existing_post , "attachments" , post_media_attachments ) existing_post . save ( )
12,481
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L819-L861
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Sync data for a many - to - many field related to a post using set differences .
def process_post_many_to_many_field ( existing_post , field , related_objects ) : to_add = set ( related_objects . get ( existing_post . wp_id , set ( ) ) ) - set ( getattr ( existing_post , field ) . all ( ) ) to_remove = set ( getattr ( existing_post , field ) . all ( ) ) - set ( related_objects . get ( existing_post . wp_id , set ( ) ) ) if to_add : getattr ( existing_post , field ) . add ( * to_add ) if to_remove : getattr ( existing_post , field ) . remove ( * to_remove )
12,482
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L864-L879
[ "def", "_clean_args", "(", "sys_argv", ",", "args", ")", ":", "base", "=", "[", "x", "for", "x", "in", "sys_argv", "if", "x", ".", "startswith", "(", "\"-\"", ")", "or", "not", "args", ".", "datadir", "==", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "x", ")", ")", "]", "# Remove installer only options we don't pass on", "base", "=", "[", "x", "for", "x", "in", "base", "if", "x", "not", "in", "set", "(", "[", "\"--minimize-disk\"", "]", ")", "]", "if", "\"--nodata\"", "in", "base", ":", "base", ".", "remove", "(", "\"--nodata\"", ")", "else", ":", "base", ".", "append", "(", "\"--data\"", ")", "return", "base" ]
Actually do a db bulk creation of posts and link up the many - to - many fields
def bulk_create_posts ( self , posts , post_categories , post_tags , post_media_attachments ) : Post . objects . bulk_create ( posts ) # attach many-to-ones for post_wp_id , categories in six . iteritems ( post_categories ) : Post . objects . get ( site_id = self . site_id , wp_id = post_wp_id ) . categories . add ( * categories ) for post_id , tags in six . iteritems ( post_tags ) : Post . objects . get ( site_id = self . site_id , wp_id = post_id ) . tags . add ( * tags ) for post_id , attachments in six . iteritems ( post_media_attachments ) : Post . objects . get ( site_id = self . site_id , wp_id = post_id ) . attachments . add ( * attachments )
12,483
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L928-L948
[ "def", "configure_splitevaluator", "(", "self", ")", ":", "if", "self", ".", "classification", ":", "speval", "=", "javabridge", ".", "make_instance", "(", "\"weka/experiment/ClassifierSplitEvaluator\"", ",", "\"()V\"", ")", "else", ":", "speval", "=", "javabridge", ".", "make_instance", "(", "\"weka/experiment/RegressionSplitEvaluator\"", ",", "\"()V\"", ")", "classifier", "=", "javabridge", ".", "call", "(", "speval", ",", "\"getClassifier\"", ",", "\"()Lweka/classifiers/Classifier;\"", ")", "return", "speval", ",", "classifier" ]
Remove Posts with post_type = attachment that have been removed from the given Post on the WordPress side .
def sync_deleted_attachments ( self , api_post ) : existing_IDs = set ( Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) ) . values_list ( "wp_id" , flat = True ) ) # can't delete what we don't have if existing_IDs : api_IDs = set ( ) # call the API again to the get the full list of attachment posts whose parent is this post's wp_id path = "sites/{}/posts/" . format ( self . site_id ) params = { "type" : "attachment" , "parent_id" : api_post [ "ID" ] , "fields" : "ID" , "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) # loop around since there may be more than 100 attachments (example: really large slideshows) while response . ok and response . text and page < 10 : api_json = response . json ( ) api_attachments = api_json . get ( "posts" , [ ] ) # iteratively extend the set to include this page's IDs api_IDs |= set ( a [ "ID" ] for a in api_attachments ) # get next page page += 1 next_page_handle = api_json . get ( "meta" , { } ) . get ( "next_page" ) if next_page_handle : params [ "page_handle" ] = next_page_handle else : # no more pages left break response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return # perform set difference to_remove = existing_IDs - api_IDs # purge the extras if to_remove : Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) , wp_id__in = list ( to_remove ) ) . delete ( )
12,484
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L950-L1020
[ "async", "def", "configuration", "(", "self", ",", "*", ",", "dc", "=", "None", ",", "consistency", "=", "None", ")", ":", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/operator/raft/configuration\"", ",", "params", "=", "{", "\"dc\"", ":", "dc", "}", ",", "consistency", "=", "consistency", ")", "return", "response", ".", "body" ]
Add lines to current line by traversing the grandparent object again and once we reach our current line counting every line that is prefixed with the parent directory .
def nextparent ( self , parent , depth ) : if depth > 1 : # can't jump to parent of root node! pdir = os . path . dirname ( self . name ) line = 0 for c , d in parent . traverse ( ) : if line > parent . curline and c . name . startswith ( pdir ) : parent . curline += 1 line += 1 else : # otherwise just skip to next directory line = - 1 # skip hidden parent node for c , d in parent . traverse ( ) : if line > parent . curline : parent . curline += 1 if os . path . isdir ( c . name ) and c . name in parent . children [ 0 : ] : break line += 1
12,485
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/actions.py#L70-L90
[ "def", "compile_column", "(", "name", ":", "str", ",", "data_type", ":", "str", ",", "nullable", ":", "bool", ")", "->", "str", ":", "null_str", "=", "'NULL'", "if", "nullable", "else", "'NOT NULL'", "return", "'{name} {data_type} {null},'", ".", "format", "(", "name", "=", "name", ",", "data_type", "=", "data_type", ",", "null", "=", "null_str", ")" ]
Subtract lines from our curline if the name of a node is prefixed with the parent directory when traversing the grandparent object .
def prevparent ( self , parent , depth ) : pdir = os . path . dirname ( self . name ) if depth > 1 : # can't jump to parent of root node! for c , d in parent . traverse ( ) : if c . name == self . name : break if c . name . startswith ( pdir ) : parent . curline -= 1 else : # otherwise jus skip to previous directory pdir = self . name # - 1 otherwise hidden parent node throws count off & our # self.curline doesn't change! line = - 1 for c , d in parent . traverse ( ) : if c . name == self . name : break if os . path . isdir ( c . name ) and c . name in parent . children [ 0 : ] : parent . curline = line line += 1 return pdir
12,486
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/actions.py#L92-L115
[ "def", "check_production_parameters_exist", "(", "self", ")", ":", "for", "k", ",", "v", "in", "self", ".", "modelInstance", ".", "parameter_sets", ".", "items", "(", ")", ":", "for", "p_id", "in", "self", ".", "modelInstance", ".", "production_params", ".", "keys", "(", ")", ":", "if", "v", ".", "get", "(", "p_id", ")", ":", "#print('{} already exists'.format(p_id))", "pass", "else", ":", "#print('No production parameter called {} - setting it to 1'.format(p_id))", "v", "[", "p_id", "]", "=", "1.0", "for", "p_id", "in", "self", ".", "modelInstance", ".", "allocation_params", ".", "keys", "(", ")", ":", "if", "v", ".", "get", "(", "p_id", ")", ":", "#print('{} already exists'.format(p_id))", "pass", "else", ":", "#print('No production parameter called {} - setting it to 1'.format(p_id))", "v", "[", "p_id", "]", "=", "1.0" ]
Store and fetch a GitHub access token
def token ( config , token ) : if not token : info_out ( "To generate a personal API token, go to:\n\n\t" "https://github.com/settings/tokens\n\n" "To read more about it, go to:\n\n\t" "https://help.github.com/articles/creating-an-access" "-token-for-command-line-use/\n\n" 'Remember to enable "repo" in the scopes.' ) token = getpass . getpass ( "GitHub API Token: " ) . strip ( ) url = urllib . parse . urljoin ( config . github_url , "/user" ) assert url . startswith ( "https://" ) , url response = requests . get ( url , headers = { "Authorization" : "token {}" . format ( token ) } ) if response . status_code == 200 : update ( config . configfile , { "GITHUB" : { "github_url" : config . github_url , "token" : token , "login" : response . json ( ) [ "login" ] , } } , ) name = response . json ( ) [ "name" ] or response . json ( ) [ "login" ] success_out ( "Hi! {}" . format ( name ) ) else : error_out ( "Failed - {} ({})" . format ( response . status_code , response . content ) )
12,487
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/github.py#L32-L61
[ "def", "find_mismatch", "(", "self", ",", "other", ",", "indent", "=", "''", ")", ":", "if", "self", "!=", "other", ":", "mismatch", "=", "\"\\n{}{}\"", ".", "format", "(", "indent", ",", "type", "(", "self", ")", ".", "__name__", ")", "else", ":", "mismatch", "=", "''", "sub_indent", "=", "indent", "+", "' '", "if", "len", "(", "list", "(", "self", ".", "filesets", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "filesets", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching summary fileset lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "filesets", ")", ")", ",", "len", "(", "list", "(", "other", ".", "filesets", ")", ")", ",", "list", "(", "self", ".", "filesets", ")", ",", "list", "(", "other", ".", "filesets", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "filesets", ",", "other", ".", "filesets", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "if", "len", "(", "list", "(", "self", ".", "fields", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "fields", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching summary field lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "fields", ")", ")", ",", "len", "(", "list", "(", "other", ".", "fields", ")", ")", ",", "list", "(", "self", ".", "fields", ")", ",", "list", "(", "other", ".", "fields", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "fields", ",", "other", ".", "fields", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "return", "mismatch" ]
Acturally logging response is not a server s responsibility you should use http tools like Chrome Developer Tools to analyse the response .
def log_response ( handler ) : content_type = handler . _headers . get ( 'Content-Type' , None ) headers_str = handler . _generate_headers ( ) block = 'Response Infomations:\n' + headers_str . strip ( ) if content_type and ( 'text' in content_type or 'json' in content_type ) : limit = 0 if 'LOG_RESPONSE_LINE_LIMIT' in settings : limit = settings [ 'LOG_RESPONSE_LINE_LIMIT' ] def cut ( s ) : if limit and len ( s ) > limit : return [ s [ : limit ] ] + cut ( s [ limit : ] ) else : return [ s ] body = '' . join ( handler . _write_buffer ) lines = [ ] for i in body . split ( '\n' ) : lines += [ '| ' + j for j in cut ( i ) ] block += '\nBody:\n' + '\n' . join ( lines ) app_log . info ( block )
12,488
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L25-L53
[ "def", "unfold_file", "(", "self", ",", "path", ")", ":", "yaml_config", "=", "self", ".", "file_index", ".", "unfold_yaml", "(", "path", ")", "self", ".", "unfold_config", "(", "path", ",", "yaml_config", ")" ]
Logging request is opposite to response sometime its necessary feel free to enable it .
def log_request ( handler ) : block = 'Request Infomations:\n' + _format_headers_log ( handler . request . headers ) if handler . request . arguments : block += '+----Arguments----+\n' for k , v in handler . request . arguments . items ( ) : block += '| {0:<15} | {1:<15} \n' . format ( repr ( k ) , repr ( v ) ) app_log . info ( block )
12,489
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L56-L68
[ "def", "_read_csv_with_offset_pandas_on_ray", "(", "fname", ",", "num_splits", ",", "start", ",", "end", ",", "kwargs", ",", "header", ")", ":", "# pragma: no cover", "index_col", "=", "kwargs", ".", "get", "(", "\"index_col\"", ",", "None", ")", "bio", "=", "file_open", "(", "fname", ",", "\"rb\"", ")", "bio", ".", "seek", "(", "start", ")", "to_read", "=", "header", "+", "bio", ".", "read", "(", "end", "-", "start", ")", "bio", ".", "close", "(", ")", "pandas_df", "=", "pandas", ".", "read_csv", "(", "BytesIO", "(", "to_read", ")", ",", "*", "*", "kwargs", ")", "pandas_df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "pandas_df", ".", "columns", ")", ")", "if", "index_col", "is", "not", "None", ":", "index", "=", "pandas_df", ".", "index", "# Partitions must have RangeIndex", "pandas_df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "0", ",", "len", "(", "pandas_df", ")", ")", "else", ":", "# We will use the lengths to build the index if we are not given an", "# `index_col`.", "index", "=", "len", "(", "pandas_df", ")", "return", "_split_result_for_readers", "(", "1", ",", "num_splits", ",", "pandas_df", ")", "+", "[", "index", "]" ]
This method is a copy of tornado . web . RequestHandler . _handle_request_exception
def _exception_default_handler ( self , e ) : if isinstance ( e , HTTPError ) : if e . log_message : format = "%d %s: " + e . log_message args = [ e . status_code , self . _request_summary ( ) ] + list ( e . args ) app_log . warning ( format , * args ) if e . status_code not in httplib . responses : app_log . error ( "Bad HTTP status code: %d" , e . status_code ) self . send_error ( 500 , exc_info = sys . exc_info ( ) ) else : self . send_error ( e . status_code , exc_info = sys . exc_info ( ) ) else : app_log . error ( "Uncaught exception %s\n%r" , self . _request_summary ( ) , self . request , exc_info = True ) self . send_error ( 500 , exc_info = sys . exc_info ( ) )
12,490
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L94-L110
[ "def", "set_acls", "(", "path", ",", "acls", ",", "version", "=", "-", "1", ",", "profile", "=", "None", ",", "hosts", "=", "None", ",", "scheme", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "default_acl", "=", "None", ")", ":", "conn", "=", "_get_zk_conn", "(", "profile", "=", "profile", ",", "hosts", "=", "hosts", ",", "scheme", "=", "scheme", ",", "username", "=", "username", ",", "password", "=", "password", ",", "default_acl", "=", "default_acl", ")", "if", "acls", "is", "None", ":", "acls", "=", "[", "]", "acls", "=", "[", "make_digest_acl", "(", "*", "*", "acl", ")", "for", "acl", "in", "acls", "]", "conn", "=", "_get_zk_conn", "(", "profile", "=", "profile", ",", "hosts", "=", "hosts", ",", "scheme", "=", "scheme", ",", "username", "=", "username", ",", "password", "=", "password", ",", "default_acl", "=", "default_acl", ")", "return", "conn", ".", "set_acls", "(", "path", ",", "acls", ",", "version", ")" ]
This method handle HTTPError exceptions the same as how tornado does leave other exceptions to be handled by user defined handler function maped in class attribute EXCEPTION_HANDLERS
def _handle_request_exception ( self , e ) : handle_func = self . _exception_default_handler if self . EXCEPTION_HANDLERS : for excs , func_name in self . EXCEPTION_HANDLERS . items ( ) : if isinstance ( e , excs ) : handle_func = getattr ( self , func_name ) break handle_func ( e ) if not self . _finished : self . finish ( )
12,491
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L112-L139
[ "def", "get_key_for_schema_and_document_string", "(", "self", ",", "schema", ",", "request_string", ")", ":", "# type: (GraphQLSchema, str) -> int", "if", "self", ".", "use_consistent_hash", ":", "schema_id", "=", "get_unique_schema_id", "(", "schema", ")", "document_id", "=", "get_unique_document_id", "(", "request_string", ")", "return", "hash", "(", "(", "schema_id", ",", "document_id", ")", ")", "return", "hash", "(", "(", "schema", ",", "request_string", ")", ")" ]
Before RequestHandler . flush was called we got the final _write_buffer .
def flush ( self , * args , * * kwgs ) : if settings [ 'LOG_RESPONSE' ] and not self . _status_code == 500 : log_response ( self ) super ( BaseHandler , self ) . flush ( * args , * * kwgs )
12,492
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L166-L175
[ "def", "normalize", "(", "template_dict", ")", ":", "resources", "=", "template_dict", ".", "get", "(", "RESOURCES_KEY", ",", "{", "}", ")", "for", "logical_id", ",", "resource", "in", "resources", ".", "items", "(", ")", ":", "resource_metadata", "=", "resource", ".", "get", "(", "METADATA_KEY", ",", "{", "}", ")", "asset_path", "=", "resource_metadata", ".", "get", "(", "ASSET_PATH_METADATA_KEY", ")", "asset_property", "=", "resource_metadata", ".", "get", "(", "ASSET_PROPERTY_METADATA_KEY", ")", "ResourceMetadataNormalizer", ".", "_replace_property", "(", "asset_property", ",", "asset_path", ",", "resource", ",", "logical_id", ")" ]
A convenient method that binds chunk code headers together
def write_json ( self , chunk , code = None , headers = None ) : assert chunk is not None , 'None cound not be written in write_json' self . set_header ( "Content-Type" , "application/json; charset=UTF-8" ) if isinstance ( chunk , dict ) or isinstance ( chunk , list ) : chunk = self . json_encode ( chunk ) # convert chunk to utf8 before `RequestHandler.write()` # so that if any error occurs, we can catch and log it try : chunk = utf8 ( chunk ) except Exception : app_log . error ( 'chunk encoding error, repr: %s' % repr ( chunk ) ) raise_exc_info ( sys . exc_info ( ) ) self . write ( chunk ) if code : self . set_status ( code ) if headers : for k , v in headers . items ( ) : self . set_header ( k , v )
12,493
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L177-L202
[ "def", "list_tables", "(", ")", ":", "tables", "=", "[", "]", "try", ":", "table_list", "=", "DYNAMODB_CONNECTION", ".", "list_tables", "(", ")", "while", "True", ":", "for", "table_name", "in", "table_list", "[", "u'TableNames'", "]", ":", "tables", ".", "append", "(", "get_table", "(", "table_name", ")", ")", "if", "u'LastEvaluatedTableName'", "in", "table_list", ":", "table_list", "=", "DYNAMODB_CONNECTION", ".", "list_tables", "(", "table_list", "[", "u'LastEvaluatedTableName'", "]", ")", "else", ":", "break", "except", "DynamoDBResponseError", "as", "error", ":", "dynamodb_error", "=", "error", ".", "body", "[", "'__type'", "]", ".", "rsplit", "(", "'#'", ",", "1", ")", "[", "1", "]", "if", "dynamodb_error", "==", "'ResourceNotFoundException'", ":", "logger", ".", "error", "(", "'No tables found'", ")", "elif", "dynamodb_error", "==", "'AccessDeniedException'", ":", "logger", ".", "debug", "(", "'Your AWS API keys lack access to listing tables. '", "'That is an issue if you are trying to use regular '", "'expressions in your table configuration.'", ")", "elif", "dynamodb_error", "==", "'UnrecognizedClientException'", ":", "logger", ".", "error", "(", "'Invalid security token. Are your AWS API keys correct?'", ")", "else", ":", "logger", ".", "error", "(", "(", "'Unhandled exception: {0}: {1}. '", "'Please file a bug report at '", "'https://github.com/sebdah/dynamic-dynamodb/issues'", ")", ".", "format", "(", "dynamodb_error", ",", "error", ".", "body", "[", "'message'", "]", ")", ")", "except", "JSONResponseError", "as", "error", ":", "logger", ".", "error", "(", "'Communication error: {0}'", ".", "format", "(", "error", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "tables" ]
Copy from tornado . web . StaticFileHandler
def write_file ( self , file_path , mime_type = None ) : if not os . path . exists ( file_path ) : raise HTTPError ( 404 ) if not os . path . isfile ( file_path ) : raise HTTPError ( 403 , "%s is not a file" , file_path ) stat_result = os . stat ( file_path ) modified = datetime . datetime . fromtimestamp ( stat_result [ stat . ST_MTIME ] ) self . set_header ( "Last-Modified" , modified ) if not mime_type : mime_type , _encoding = mimetypes . guess_type ( file_path ) if mime_type : self . set_header ( "Content-Type" , mime_type ) # Check the If-Modified-Since, and don't send the result if the # content has not been modified ims_value = self . request . headers . get ( "If-Modified-Since" ) if ims_value is not None : date_tuple = email . utils . parsedate ( ims_value ) if_since = datetime . datetime . fromtimestamp ( time . mktime ( date_tuple ) ) if if_since >= modified : self . set_status ( 304 ) return with open ( file_path , "rb" ) as file : data = file . read ( ) hasher = hashlib . sha1 ( ) hasher . update ( data ) self . set_header ( "Etag" , '"%s"' % hasher . hexdigest ( ) ) self . write ( data )
12,494
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L204-L237
[ "def", "compare", "(", "dicts", ")", ":", "common_members", "=", "{", "}", "common_keys", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "map", "(", "dict", ".", "keys", ",", "dicts", ")", ")", "for", "k", "in", "common_keys", ":", "common_members", "[", "k", "]", "=", "list", "(", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "[", "set", "(", "d", "[", "k", "]", ")", "for", "d", "in", "dicts", "]", ")", ")", "return", "common_members" ]
Behaves like a middleware between raw request and handling process
def prepare ( self ) : if settings [ 'LOG_REQUEST' ] : log_request ( self ) for i in self . PREPARES : getattr ( self , 'prepare_' + i ) ( ) if self . _finished : return
12,495
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L250-L265
[ "def", "find_covalent_bonds", "(", "ampal", ",", "max_range", "=", "2.2", ",", "threshold", "=", "1.1", ",", "tag", "=", "True", ")", ":", "sectors", "=", "gen_sectors", "(", "ampal", ".", "get_atoms", "(", ")", ",", "max_range", "*", "1.1", ")", "bonds", "=", "[", "]", "for", "sector", "in", "sectors", ".", "values", "(", ")", ":", "atoms", "=", "itertools", ".", "combinations", "(", "sector", ",", "2", ")", "bonds", ".", "extend", "(", "covalent_bonds", "(", "atoms", ",", "threshold", "=", "threshold", ")", ")", "bond_set", "=", "list", "(", "set", "(", "bonds", ")", ")", "if", "tag", ":", "for", "bond", "in", "bond_set", ":", "a", ",", "b", "=", "bond", ".", "a", ",", "bond", ".", "b", "if", "'covalent_bonds'", "not", "in", "a", ".", "tags", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "b", "]", "else", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "b", ")", "if", "'covalent_bonds'", "not", "in", "b", ".", "tags", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "a", "]", "else", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "a", ")", "return", "bond_set" ]
Reads a csv and returns a List of Dicts with keys given by header row .
def csv_to_dicts ( file , header = None ) : with open ( file ) as csvfile : return [ row for row in csv . DictReader ( csvfile , fieldnames = header ) ]
12,496
https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/csv.py#L23-L26
[ "def", "_undedicate_device", "(", "self", ",", "userid", ",", "vaddr", ")", ":", "action", "=", "'undedicate'", "rd", "=", "(", "'changevm %(uid)s %(act)s %(va)s'", "%", "{", "'uid'", ":", "userid", ",", "'act'", ":", "action", ",", "'va'", ":", "vaddr", "}", ")", "action", "=", "\"undedicate device from userid '%s'\"", "%", "userid", "with", "zvmutils", ".", "log_and_reraise_smt_request_failed", "(", "action", ")", ":", "self", ".", "_request", "(", "rd", ")" ]
A glorious command line tool to make your life with git GitHub and Bugzilla much easier .
def cli ( config , configfile , verbose ) : config . verbose = verbose config . configfile = configfile if not os . path . isfile ( configfile ) : state . write ( configfile , { } )
12,497
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/main.py#L36-L42
[ "def", "_getNearestMappingIndexList", "(", "fromValList", ",", "toValList", ")", ":", "indexList", "=", "[", "]", "for", "fromTimestamp", "in", "fromValList", ":", "smallestDiff", "=", "_getSmallestDifference", "(", "toValList", ",", "fromTimestamp", ")", "i", "=", "toValList", ".", "index", "(", "smallestDiff", ")", "indexList", ".", "append", "(", "i", ")", "return", "indexList" ]
parses and cleans up possible uri inputs return instance of rdflib . term . URIRef
def parse_uri ( self , uri = None ) : # no uri provided, assume root if not uri : return rdflib . term . URIRef ( self . root ) # string uri provided elif type ( uri ) == str : # assume "short" uri, expand with repo root if type ( uri ) == str and not uri . startswith ( 'http' ) : return rdflib . term . URIRef ( "%s%s" % ( self . root , uri ) ) # else, assume full uri else : return rdflib . term . URIRef ( uri ) # already rdflib.term.URIRef elif type ( uri ) == rdflib . term . URIRef : return uri # unknown input else : raise TypeError ( 'invalid URI input' )
12,498
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L105-L138
[ "def", "benchmark_setup", "(", "self", ")", ":", "def", "f", "(", ")", ":", "self", ".", "_setup", "(", ")", "self", ".", "mod_ext", ".", "synchronize", "(", "*", "*", "self", ".", "ext_kwargs", ")", "f", "(", ")", "# Ignore first", "self", ".", "setup_stat", "=", "self", ".", "_calc_benchmark_stat", "(", "f", ")" ]
Convenience method for creating a new resource
def create_resource ( self , resource_type = None , uri = None ) : if resource_type in [ NonRDFSource , Binary , BasicContainer , DirectContainer , IndirectContainer ] : return resource_type ( self , uri ) else : raise TypeError ( "expecting Resource type, such as BasicContainer or NonRDFSource" )
12,499
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L141-L159
[ "def", "_get_keycodes", "(", ")", ":", "try", ":", "return", "_key_cache", ".", "pop", "(", ")", "except", "IndexError", ":", "pass", "result", "=", "[", "]", "terminators", "=", "'ABCDFHPQRS~'", "with", "TerminalContext", "(", ")", ":", "code", "=", "get_ord", "(", ")", "result", ".", "append", "(", "code", ")", "if", "code", "==", "27", ":", "with", "TimerContext", "(", "0.1", ")", "as", "timer", ":", "code", "=", "get_ord", "(", ")", "if", "not", "timer", ".", "timed_out", ":", "result", ".", "append", "(", "code", ")", "result", ".", "append", "(", "get_ord", "(", ")", ")", "if", "64", "<", "result", "[", "-", "1", "]", "<", "69", ":", "pass", "elif", "result", "[", "1", "]", "==", "91", ":", "while", "True", ":", "code", "=", "get_ord", "(", ")", "result", ".", "append", "(", "code", ")", "if", "chr", "(", "code", ")", "in", "terminators", ":", "break", "return", "tuple", "(", "result", ")" ]