query
stringlengths 5
1.23k
| positive
stringlengths 53
15.2k
| id_
int64 0
252k
| task_name
stringlengths 87
242
| negative
listlengths 20
553
|
|---|---|---|---|---|
Return the run number and the file name .
|
def runstring ( self ) : cfile = self . template % self . last self . last += 1 return cfile
| 3,700
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/factory.py#L21-L25
|
[
"def",
"_intersection",
"(",
"self",
",",
"keys",
",",
"rows",
")",
":",
"# If there are no other keys with start and end date (i.e. nothing to merge) return immediately.",
"if",
"not",
"keys",
":",
"return",
"rows",
"ret",
"=",
"list",
"(",
")",
"for",
"row",
"in",
"rows",
":",
"start_date",
"=",
"row",
"[",
"self",
".",
"_key_start_date",
"]",
"end_date",
"=",
"row",
"[",
"self",
".",
"_key_end_date",
"]",
"for",
"key_start_date",
",",
"key_end_date",
"in",
"keys",
":",
"start_date",
",",
"end_date",
"=",
"Type2JoinHelper",
".",
"_intersect",
"(",
"start_date",
",",
"end_date",
",",
"row",
"[",
"key_start_date",
"]",
",",
"row",
"[",
"key_end_date",
"]",
")",
"if",
"not",
"start_date",
":",
"break",
"if",
"key_start_date",
"not",
"in",
"[",
"self",
".",
"_key_start_date",
",",
"self",
".",
"_key_end_date",
"]",
":",
"del",
"row",
"[",
"key_start_date",
"]",
"if",
"key_end_date",
"not",
"in",
"[",
"self",
".",
"_key_start_date",
",",
"self",
".",
"_key_end_date",
"]",
":",
"del",
"row",
"[",
"key_end_date",
"]",
"if",
"start_date",
":",
"row",
"[",
"self",
".",
"_key_start_date",
"]",
"=",
"start_date",
"row",
"[",
"self",
".",
"_key_end_date",
"]",
"=",
"end_date",
"ret",
".",
"append",
"(",
"row",
")",
"return",
"ret"
] |
Override instrument configuration if configuration is not None
|
def obsres_from_oblock_id ( self , obsid , configuration = None ) : este = self . ob_table [ obsid ] obsres = obsres_from_dict ( este ) _logger . debug ( "obsres_from_oblock_id id='%s', mode='%s' START" , obsid , obsres . mode ) try : this_drp = self . drps . query_by_name ( obsres . instrument ) except KeyError : raise ValueError ( 'no DRP for instrument {}' . format ( obsres . instrument ) ) # Reserved names if obsres . mode in self . _RESERVED_MODE_NAMES : selected_mode = None # null mode else : selected_mode = this_drp . modes [ obsres . mode ] if selected_mode : obsres = selected_mode . build_ob ( obsres , self ) obsres = selected_mode . tag_ob ( obsres ) if configuration : # override instrument configuration # obsres.configuration = self.search_instrument_configuration( # obsres.instrument, # configuration #) pass else : # Insert Instrument configuration pass # obsres.configuration = this_drp.configuration_selector(obsres) key , date_obs , keyname = this_drp . select_profile ( obsres ) obsres . configuration = self . assembly_instrument ( key , date_obs , keyname ) obsres . profile = obsres . configuration _logger . debug ( 'obsres_from_oblock_id %s END' , obsid ) return obsres
| 3,701
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/dal/dictdal.py#L141-L179
|
[
"def",
"seed",
"(",
"vault_client",
",",
"opt",
")",
":",
"if",
"opt",
".",
"thaw_from",
":",
"opt",
".",
"secrets",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'aomi-thaw'",
")",
"auto_thaw",
"(",
"vault_client",
",",
"opt",
")",
"Context",
".",
"load",
"(",
"get_secretfile",
"(",
"opt",
")",
",",
"opt",
")",
".",
"fetch",
"(",
"vault_client",
")",
".",
"sync",
"(",
"vault_client",
",",
"opt",
")",
"if",
"opt",
".",
"thaw_from",
":",
"rmtree",
"(",
"opt",
".",
"secrets",
")"
] |
Apply function to nodes
|
def map_tree ( visitor , tree ) : newn = [ map_tree ( visitor , node ) for node in tree . nodes ] return visitor ( tree , newn )
| 3,702
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L44-L47
|
[
"def",
"main",
"(",
")",
":",
"fmt",
"=",
"'svg'",
"title",
"=",
"\"\"",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"file",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"X",
"=",
"numpy",
".",
"loadtxt",
"(",
"file",
")",
"file",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"2",
"]",
"X2",
"=",
"numpy",
".",
"loadtxt",
"(",
"file",
")",
"# else:",
"# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)",
"else",
":",
"print",
"(",
"'-f option required'",
")",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-fmt'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-fmt'",
")",
"fmt",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-t'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-t'",
")",
"title",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"CDF",
"=",
"{",
"'X'",
":",
"1",
"}",
"pmagplotlib",
".",
"plot_init",
"(",
"CDF",
"[",
"'X'",
"]",
",",
"5",
",",
"5",
")",
"pmagplotlib",
".",
"plot_cdf",
"(",
"CDF",
"[",
"'X'",
"]",
",",
"X",
",",
"''",
",",
"'r'",
",",
"''",
")",
"pmagplotlib",
".",
"plot_cdf",
"(",
"CDF",
"[",
"'X'",
"]",
",",
"X2",
",",
"title",
",",
"'b'",
",",
"''",
")",
"D",
",",
"p",
"=",
"scipy",
".",
"stats",
".",
"ks_2samp",
"(",
"X",
",",
"X2",
")",
"if",
"p",
">=",
".05",
":",
"print",
"(",
"D",
",",
"p",
",",
"' not rejected at 95%'",
")",
"else",
":",
"print",
"(",
"D",
",",
"p",
",",
"' rejected at 95%'",
")",
"pmagplotlib",
".",
"draw_figs",
"(",
"CDF",
")",
"ans",
"=",
"input",
"(",
"'S[a]ve plot, <Return> to quit '",
")",
"if",
"ans",
"==",
"'a'",
":",
"files",
"=",
"{",
"'X'",
":",
"'CDF_.'",
"+",
"fmt",
"}",
"pmagplotlib",
".",
"save_plots",
"(",
"CDF",
",",
"files",
")"
] |
Return parts of the tree that fulfill condition
|
def filter_tree ( condition , tree ) : if condition ( tree ) : for node in tree . nodes : # this works in python > 3.3 # yield from filter_tree(condition, node) for n in filter_tree ( condition , node ) : yield n yield tree
| 3,703
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L50-L58
|
[
"def",
"load_ragged_time_series",
"(",
"filename",
",",
"dtype",
"=",
"float",
",",
"delimiter",
"=",
"r'\\s+'",
",",
"header",
"=",
"False",
")",
":",
"# Initialize empty lists",
"times",
"=",
"[",
"]",
"values",
"=",
"[",
"]",
"# Create re object for splitting lines",
"splitter",
"=",
"re",
".",
"compile",
"(",
"delimiter",
")",
"if",
"header",
":",
"start_row",
"=",
"1",
"else",
":",
"start_row",
"=",
"0",
"with",
"_open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
")",
"as",
"input_file",
":",
"for",
"row",
",",
"line",
"in",
"enumerate",
"(",
"input_file",
",",
"start_row",
")",
":",
"# Split each line using the supplied delimiter",
"data",
"=",
"splitter",
".",
"split",
"(",
"line",
".",
"strip",
"(",
")",
")",
"try",
":",
"converted_time",
"=",
"float",
"(",
"data",
"[",
"0",
"]",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"exe",
":",
"six",
".",
"raise_from",
"(",
"ValueError",
"(",
"\"Couldn't convert value {} using {} \"",
"\"found at {}:{:d}:\\n\\t{}\"",
".",
"format",
"(",
"data",
"[",
"0",
"]",
",",
"float",
".",
"__name__",
",",
"filename",
",",
"row",
",",
"line",
")",
")",
",",
"exe",
")",
"times",
".",
"append",
"(",
"converted_time",
")",
"# cast values to a numpy array. time stamps with no values are cast",
"# to an empty array.",
"try",
":",
"converted_value",
"=",
"np",
".",
"array",
"(",
"data",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"dtype",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"exe",
":",
"six",
".",
"raise_from",
"(",
"ValueError",
"(",
"\"Couldn't convert value {} using {} \"",
"\"found at {}:{:d}:\\n\\t{}\"",
".",
"format",
"(",
"data",
"[",
"1",
":",
"]",
",",
"dtype",
".",
"__name__",
",",
"filename",
",",
"row",
",",
"line",
")",
")",
",",
"exe",
")",
"values",
".",
"append",
"(",
"converted_value",
")",
"return",
"np",
".",
"array",
"(",
"times",
")",
",",
"values"
] |
Substitute Placeholder nodes by its value in tags
|
def fill_placeholders ( self , tags ) : def change_p_node_tags ( node , children ) : if isinstance ( node , Placeholder ) : value = ConstExpr ( tags [ node . name ] ) return value else : return node . clone ( children ) return map_tree ( change_p_node_tags , self )
| 3,704
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L137-L146
|
[
"def",
"signalize_extensions",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.rownumber used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.<exception> used\"",
",",
"SalesforceWarning",
")",
"# TODO",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.connection used\"",
",",
"SalesforceWarning",
")",
"# not implemented DB-API extension cursor.scroll(, SalesforceWarning)",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.next(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.__iter__(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.lastrowid used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension .errorhandler used\"",
",",
"SalesforceWarning",
")"
] |
Take a list of molecules and return just a list of atomic symbols possibly adding hydrogen
|
def molecules2symbols ( molecules , add_hydrogen = True ) : symbols = sorted ( list ( set ( ase . symbols . string2symbols ( '' . join ( map ( lambda _x : '' . join ( ase . symbols . string2symbols ( _x ) ) , molecules ) ) ) ) ) , key = lambda _y : ase . data . atomic_numbers [ _y ] ) if add_hydrogen and 'H' not in symbols : symbols . insert ( 0 , 'H' ) return symbols
| 3,705
|
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L8-L25
|
[
"def",
"_read_para_reg_failed",
"(",
"self",
",",
"code",
",",
"cbit",
",",
"clen",
",",
"*",
",",
"desc",
",",
"length",
",",
"version",
")",
":",
"_life",
"=",
"collections",
".",
"namedtuple",
"(",
"'Lifetime'",
",",
"(",
"'min'",
",",
"'max'",
")",
")",
"_mint",
"=",
"self",
".",
"_read_unpack",
"(",
"1",
")",
"_maxt",
"=",
"self",
".",
"_read_unpack",
"(",
"1",
")",
"_type",
"=",
"list",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"clen",
"-",
"2",
")",
":",
"_code",
"=",
"self",
".",
"_read_unpack",
"(",
"1",
")",
"_kind",
"=",
"_REG_FAILURE_TYPE",
".",
"get",
"(",
"_code",
")",
"if",
"_kind",
"is",
"None",
":",
"if",
"0",
"<=",
"_code",
"<=",
"200",
":",
"_kind",
"=",
"'Unassigned (IETF Review)'",
"elif",
"201",
"<=",
"_code",
"<=",
"255",
":",
"_kind",
"=",
"'Unassigned (Reserved for Private Use)'",
"else",
":",
"raise",
"ProtocolError",
"(",
"f'HIPv{version}: [Parano {code}] invalid format'",
")",
"_type",
".",
"append",
"(",
"_kind",
")",
"reg_failed",
"=",
"dict",
"(",
"type",
"=",
"desc",
",",
"critical",
"=",
"cbit",
",",
"length",
"=",
"clen",
",",
"lifetime",
"=",
"_life",
"(",
"_mint",
",",
"_maxt",
")",
",",
"reg_type",
"=",
"tuple",
"(",
"_type",
")",
",",
")",
"_plen",
"=",
"length",
"-",
"clen",
"if",
"_plen",
":",
"self",
".",
"_read_fileng",
"(",
"_plen",
")",
"return",
"reg_failed"
] |
Take a list of symbols and construct gas phase references system when possible avoiding O2 . Candidates can be rearranged where earlier candidates get higher preference than later candidates
|
def construct_reference_system ( symbols , candidates = None , options = None , ) : if hasattr ( options , 'no_hydrogen' ) and options . no_hydrogen : add_hydrogen = False else : add_hydrogen = True references = { } sorted_candidates = [ 'H2' , 'H2O' , 'NH3' , 'N2' , 'CH4' , 'CO' , 'H2S' , 'HCl' , 'O2' ] if candidates is None : candidates = sorted_candidates else : odd_candidates = [ c for c in candidates if c not in sorted_candidates ] candidates = [ c for c in sorted_candidates if c in candidates ] + odd_candidates added_symbols = [ ] # go symbols in adsorbate # to add reference species in procedural manner for symbol in symbols : added_symbols . append ( symbol ) for candidate in candidates : _symbols = ase . symbols . string2symbols ( candidate ) # Add partial adsorbate species # is subset of reference species # and reference species # is subset of full adsorbate species set if set ( added_symbols ) <= set ( list ( references . keys ( ) ) + _symbols ) and set ( list ( references . keys ( ) ) + _symbols ) <= set ( symbols ) and candidate not in references . values ( ) : references [ symbol ] = candidate break else : raise UserWarning ( ( "No candidate satisfied {symbol}. Add more candidates\n" " Symbols {symbols}\n" " _Symbols {_symbols}\n" " References {references}\n" " Candidates {candidates}\n" ) . format ( symbol = symbol , symbols = symbols , _symbols = _symbols , candidates = candidates , references = list ( references . keys ( ) ) , ) ) sorted_references = [ ] references = list ( references . items ( ) ) # put references in order so that each reference # only adds one one additional species in each step # while references: # for i, reference in enumerate(references): # if len(set(ase.symbols.string2symbols(reference[1])) - # set(x[0] for x in sorted_references)) == 1: # sorted_references.append(references.pop(i)) # break return references
| 3,706
|
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L28-L106
|
[
"def",
"wait_until_not",
"(",
"self",
",",
"condition",
")",
":",
"return",
"WebDriverWait",
"(",
"self",
".",
"driver",
",",
"self",
".",
"timeout",
")",
".",
"until_not",
"(",
"condition",
")"
] |
Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules .
|
def get_stoichiometry_factors ( adsorbates , references ) : stoichiometry = get_atomic_stoichiometry ( references ) stoichiometry_factors = { } for adsorbate in adsorbates : for symbol in ase . symbols . string2symbols ( adsorbate ) : symbol_index = list ( map ( lambda _x : _x [ 0 ] , references ) ) . index ( symbol ) for ( factor , ( ref_symbol , ref_molecule ) ) in zip ( stoichiometry [ symbol_index ] , references ) : stoichiometry_factors . setdefault ( adsorbate , { } ) [ ref_molecule ] = stoichiometry_factors . setdefault ( adsorbate , { } ) . get ( ref_molecule , 0 ) + factor nonzero_factors = { } for key , value in stoichiometry_factors [ adsorbate ] . items ( ) : if not np . isclose ( value , 0. ) : nonzero_factors [ key ] = value stoichiometry_factors [ adsorbate ] = nonzero_factors return stoichiometry_factors
| 3,707
|
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L132-L161
|
[
"def",
"finish",
"(",
"self",
")",
":",
"self",
".",
"_my_map",
"[",
"'over'",
"]",
"=",
"True",
"# finished == over?",
"self",
".",
"_my_map",
"[",
"'completionTime'",
"]",
"=",
"DateTime",
".",
"utcnow",
"(",
")",
"self",
".",
"_save",
"(",
")"
] |
Returns a dict of field name and cleaned value pairs to initialize the model . Beware it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV . Whitespace around the value of the cell is stripped .
|
def get_fields_dict ( self , row ) : return { k : getattr ( self , 'clean_{}' . format ( k ) , lambda x : x ) ( v . strip ( ) if isinstance ( v , str ) else None ) for k , v in zip_longest ( self . get_fields ( ) , row ) }
| 3,708
|
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/importers/__init__.py#L90-L98
|
[
"def",
"setOverlayTexelAspect",
"(",
"self",
",",
"ulOverlayHandle",
",",
"fTexelAspect",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"setOverlayTexelAspect",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"fTexelAspect",
")",
"return",
"result"
] |
Process a node in result . json structure
|
def process_node ( node ) : value = node [ 'value' ] mname = node [ 'name' ] typeid = node [ 'typeid' ] if typeid == 52 : # StructDataValue obj = { } for el in value [ 'elements' ] : key , val = process_node ( el ) obj [ key ] = val if value [ 'struct_type' ] != 'dict' : # Value is not a dict klass = objimp . import_object ( value [ 'struct_type' ] ) newobj = klass . __new__ ( klass ) if hasattr ( newobj , '__setstate__' ) : newobj . __setstate__ ( obj ) else : newobj . __dict__ = obj obj = newobj elif typeid == 9 : data = value [ 'data' ] dim = value [ 'dimension' ] shape = dim [ 'height' ] , dim [ 'width' ] obj = data elif typeid == 90 : # StructDataValueList obj = [ ] for el in value : sobj = { } for sel in el [ 'elements' ] : key , val = process_node ( sel ) sobj [ key ] = val obj . append ( sobj ) elif typeid == 45 : # Frame obj = dataframe . DataFrame ( frame = os . path . abspath ( value [ 'path' ] ) ) else : obj = value return mname , obj
| 3,709
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L20-L59
|
[
"def",
"apply",
"(",
"self",
",",
"img",
")",
":",
"yup",
",",
"uup",
",",
"vup",
"=",
"self",
".",
"getUpLimit",
"(",
")",
"ydwn",
",",
"udwn",
",",
"vdwn",
"=",
"self",
".",
"getDownLimit",
"(",
")",
"yuv",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_BGR2YUV",
")",
"minValues",
"=",
"np",
".",
"array",
"(",
"[",
"ydwn",
",",
"udwn",
",",
"vdwn",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"maxValues",
"=",
"np",
".",
"array",
"(",
"[",
"yup",
",",
"uup",
",",
"vup",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"mask",
"=",
"cv2",
".",
"inRange",
"(",
"yuv",
",",
"minValues",
",",
"maxValues",
")",
"res",
"=",
"cv2",
".",
"bitwise_and",
"(",
"img",
",",
"img",
",",
"mask",
"=",
"mask",
")",
"return",
"res"
] |
Create a dictionary with the contents of result . json
|
def build_result ( data ) : more = { } for key , value in data . items ( ) : if key != 'elements' : newnode = value else : newnode = { } for el in value : nkey , nvalue = process_node ( el ) newnode [ nkey ] = nvalue more [ key ] = newnode return more
| 3,710
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L62-L76
|
[
"def",
"_DeleteClientActionRequest",
"(",
"self",
",",
"to_delete",
",",
"cursor",
"=",
"None",
")",
":",
"query",
"=",
"\"DELETE FROM client_action_requests WHERE \"",
"conditions",
"=",
"[",
"]",
"args",
"=",
"[",
"]",
"for",
"client_id",
",",
"flow_id",
",",
"request_id",
"in",
"to_delete",
":",
"conditions",
".",
"append",
"(",
"\"(client_id=%s AND flow_id=%s AND request_id=%s)\"",
")",
"args",
".",
"append",
"(",
"db_utils",
".",
"ClientIDToInt",
"(",
"client_id",
")",
")",
"args",
".",
"append",
"(",
"db_utils",
".",
"FlowIDToInt",
"(",
"flow_id",
")",
")",
"args",
".",
"append",
"(",
"request_id",
")",
"query",
"+=",
"\" OR \"",
".",
"join",
"(",
"conditions",
")",
"cursor",
".",
"execute",
"(",
"query",
",",
"args",
")"
] |
Access all the instance descriptors
|
def _finalize ( self , all_msg_errors = None ) : if all_msg_errors is None : all_msg_errors = [ ] for key in self . stored ( ) : try : getattr ( self , key ) except ( ValueError , TypeError ) as err : all_msg_errors . append ( err . args [ 0 ] ) # Raises a list of all the missing entries if all_msg_errors : raise ValueError ( all_msg_errors )
| 3,711
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipeinout.py#L44-L61
|
[
"def",
"delete_link",
"(",
"link_id",
",",
"purge_data",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"try",
":",
"link_i",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Link",
")",
".",
"filter",
"(",
"Link",
".",
"id",
"==",
"link_id",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"raise",
"ResourceNotFoundError",
"(",
"\"Link %s not found\"",
"%",
"(",
"link_id",
")",
")",
"group_items",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceGroupItem",
")",
".",
"filter",
"(",
"ResourceGroupItem",
".",
"link_id",
"==",
"link_id",
")",
".",
"all",
"(",
")",
"for",
"gi",
"in",
"group_items",
":",
"db",
".",
"DBSession",
".",
"delete",
"(",
"gi",
")",
"if",
"purge_data",
"==",
"'Y'",
":",
"_purge_datasets_unique_to_resource",
"(",
"'LINK'",
",",
"link_id",
")",
"log",
".",
"info",
"(",
"\"Deleting link %s, id=%s\"",
",",
"link_i",
".",
"name",
",",
"link_id",
")",
"link_i",
".",
"network",
".",
"check_write_permission",
"(",
"user_id",
")",
"db",
".",
"DBSession",
".",
"delete",
"(",
"link_i",
")",
"db",
".",
"DBSession",
".",
"flush",
"(",
")"
] |
Validate myself .
|
def validate ( self ) : for key , req in self . stored ( ) . items ( ) : val = getattr ( self , key ) req . validate ( val ) # Run checks defined in __checkers__ self . _run_checks ( )
| 3,712
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipeinout.py#L70-L78
|
[
"def",
"list_kadastrale_afdelingen",
"(",
"self",
")",
":",
"def",
"creator",
"(",
")",
":",
"gemeentes",
"=",
"self",
".",
"list_gemeenten",
"(",
")",
"res",
"=",
"[",
"]",
"for",
"g",
"in",
"gemeentes",
":",
"res",
"+=",
"self",
".",
"list_kadastrale_afdelingen_by_gemeente",
"(",
"g",
")",
"return",
"res",
"if",
"self",
".",
"caches",
"[",
"'permanent'",
"]",
".",
"is_configured",
":",
"key",
"=",
"'list_afdelingen_rest'",
"afdelingen",
"=",
"self",
".",
"caches",
"[",
"'permanent'",
"]",
".",
"get_or_create",
"(",
"key",
",",
"creator",
")",
"else",
":",
"afdelingen",
"=",
"creator",
"(",
")",
"return",
"afdelingen"
] |
Decorate run method inputs and outputs are validated
|
def validate ( method ) : @ wraps ( method ) def mod_run ( self , rinput ) : self . validate_input ( rinput ) # result = method ( self , rinput ) # self . validate_result ( result ) return result return mod_run
| 3,713
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L19-L31
|
[
"def",
"guess_peb_size",
"(",
"path",
")",
":",
"file_offset",
"=",
"0",
"offsets",
"=",
"[",
"]",
"f",
"=",
"open",
"(",
"path",
",",
"'rb'",
")",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"file_size",
"=",
"f",
".",
"tell",
"(",
")",
"+",
"1",
"f",
".",
"seek",
"(",
"0",
")",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"file_size",
",",
"FILE_CHUNK_SZ",
")",
":",
"buf",
"=",
"f",
".",
"read",
"(",
"FILE_CHUNK_SZ",
")",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"UBI_EC_HDR_MAGIC",
",",
"buf",
")",
":",
"start",
"=",
"m",
".",
"start",
"(",
")",
"if",
"not",
"file_offset",
":",
"file_offset",
"=",
"start",
"idx",
"=",
"start",
"else",
":",
"idx",
"=",
"start",
"+",
"file_offset",
"offsets",
".",
"append",
"(",
"idx",
")",
"file_offset",
"+=",
"FILE_CHUNK_SZ",
"f",
".",
"close",
"(",
")",
"occurances",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"offsets",
")",
")",
":",
"try",
":",
"diff",
"=",
"offsets",
"[",
"i",
"]",
"-",
"offsets",
"[",
"i",
"-",
"1",
"]",
"except",
":",
"diff",
"=",
"offsets",
"[",
"i",
"]",
"if",
"diff",
"not",
"in",
"occurances",
":",
"occurances",
"[",
"diff",
"]",
"=",
"0",
"occurances",
"[",
"diff",
"]",
"+=",
"1",
"most_frequent",
"=",
"0",
"block_size",
"=",
"None",
"for",
"offset",
"in",
"occurances",
":",
"if",
"occurances",
"[",
"offset",
"]",
">",
"most_frequent",
":",
"most_frequent",
"=",
"occurances",
"[",
"offset",
"]",
"block_size",
"=",
"offset",
"return",
"block_size"
] |
Convert a scalar validator in a list validator
|
def as_list ( callable ) : @ wraps ( callable ) def wrapper ( value_iter ) : return [ callable ( value ) for value in value_iter ] return wrapper
| 3,714
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L41-L47
|
[
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_flush",
"(",
")",
"filesize",
"=",
"self",
".",
"file",
".",
"tell",
"(",
")",
"super",
"(",
"BLFWriter",
",",
"self",
")",
".",
"stop",
"(",
")",
"# Write header in the beginning of the file",
"header",
"=",
"[",
"b\"LOGG\"",
",",
"FILE_HEADER_SIZE",
",",
"APPLICATION_ID",
",",
"0",
",",
"0",
",",
"0",
",",
"2",
",",
"6",
",",
"8",
",",
"1",
"]",
"# The meaning of \"count of objects read\" is unknown",
"header",
".",
"extend",
"(",
"[",
"filesize",
",",
"self",
".",
"uncompressed_size",
",",
"self",
".",
"count_of_objects",
",",
"0",
"]",
")",
"header",
".",
"extend",
"(",
"timestamp_to_systemtime",
"(",
"self",
".",
"start_timestamp",
")",
")",
"header",
".",
"extend",
"(",
"timestamp_to_systemtime",
"(",
"self",
".",
"stop_timestamp",
")",
")",
"with",
"open",
"(",
"self",
".",
"file",
".",
"name",
",",
"\"r+b\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"FILE_HEADER_STRUCT",
".",
"pack",
"(",
"*",
"header",
")",
")"
] |
Generates a function that validates that a number is within range
|
def range_validator ( minval = None , maxval = None ) : def checker_func ( value ) : if minval is not None and value < minval : msg = "must be >= {}" . format ( minval ) raise ValidationError ( msg ) if maxval is not None and value > maxval : msg = "must be <= {}" . format ( maxval ) raise ValidationError ( msg ) return value return checker_func
| 3,715
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L50-L75
|
[
"def",
"merge_entities",
"(",
"self",
",",
"from_entity_ids",
",",
"to_entity_id",
",",
"force",
"=",
"False",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"params",
"=",
"{",
"'from_entity_ids'",
":",
"from_entity_ids",
",",
"'to_entity_id'",
":",
"to_entity_id",
",",
"'force'",
":",
"force",
",",
"}",
"api_path",
"=",
"'/v1/{mount_point}/entity/merge'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
] |
Run a pylpfile .
|
def run ( path , tasks ) : # Test if the pylpfile exists readable_path = make_readable_path ( path ) if not os . path . isfile ( path ) : logger . log ( logger . red ( "Can't read pylpfile " ) , logger . magenta ( readable_path ) ) sys . exit ( - 1 ) else : logger . log ( "Using pylpfile " , logger . magenta ( readable_path ) ) # Run the pylpfile try : runpy . run_path ( path , None , "pylpfile" ) except Exception as e : traceback . print_exc ( file = sys . stdout ) logger . log ( logger . red ( "\nAn error has occurred during the execution of the pylpfile" ) ) sys . exit ( - 1 ) # Start the tasks for name in tasks : pylp . start ( name ) # Wait until all task are executed loop = asyncio . get_event_loop ( ) loop . run_until_complete ( wait_and_quit ( loop ) )
| 3,716
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/run.py#L18-L45
|
[
"def",
"_generate_noise_temporal",
"(",
"stimfunction_tr",
",",
"tr_duration",
",",
"dimensions",
",",
"template",
",",
"mask",
",",
"noise_dict",
")",
":",
"# Set up common parameters",
"# How many TRs are there",
"trs",
"=",
"len",
"(",
"stimfunction_tr",
")",
"# What time points are sampled by a TR?",
"timepoints",
"=",
"list",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"(",
"trs",
"-",
"1",
")",
"*",
"tr_duration",
",",
"trs",
")",
")",
"# Preset the volume",
"noise_volume",
"=",
"np",
".",
"zeros",
"(",
"(",
"dimensions",
"[",
"0",
"]",
",",
"dimensions",
"[",
"1",
"]",
",",
"dimensions",
"[",
"2",
"]",
",",
"trs",
")",
")",
"# Generate the drift noise",
"if",
"noise_dict",
"[",
"'drift_sigma'",
"]",
"!=",
"0",
":",
"# Calculate the drift time course",
"noise",
"=",
"_generate_noise_temporal_drift",
"(",
"trs",
",",
"tr_duration",
",",
")",
"# Create a volume with the drift properties",
"volume",
"=",
"np",
".",
"ones",
"(",
"dimensions",
")",
"# Combine the volume and noise",
"noise_volume",
"+=",
"np",
".",
"multiply",
".",
"outer",
"(",
"volume",
",",
"noise",
")",
"*",
"noise_dict",
"[",
"'drift_sigma'",
"]",
"# Generate the physiological noise",
"if",
"noise_dict",
"[",
"'physiological_sigma'",
"]",
"!=",
"0",
":",
"# Calculate the physiological time course",
"noise",
"=",
"_generate_noise_temporal_phys",
"(",
"timepoints",
",",
")",
"# Create a brain shaped volume with similar smoothing properties",
"volume",
"=",
"_generate_noise_spatial",
"(",
"dimensions",
"=",
"dimensions",
",",
"mask",
"=",
"mask",
",",
"fwhm",
"=",
"noise_dict",
"[",
"'fwhm'",
"]",
",",
")",
"# Combine the volume and noise",
"noise_volume",
"+=",
"np",
".",
"multiply",
".",
"outer",
"(",
"volume",
",",
"noise",
")",
"*",
"noise_dict",
"[",
"'physiological_sigma'",
"]",
"# Generate the AR noise",
"if",
"noise_dict",
"[",
"'auto_reg_sigma'",
"]",
"!=",
"0",
":",
"# Calculate the AR time course volume",
"noise",
"=",
"_generate_noise_temporal_autoregression",
"(",
"timepoints",
",",
"noise_dict",
",",
"dimensions",
",",
"mask",
",",
")",
"# Combine the volume and noise",
"noise_volume",
"+=",
"noise",
"*",
"noise_dict",
"[",
"'auto_reg_sigma'",
"]",
"# Generate the task related noise",
"if",
"noise_dict",
"[",
"'task_sigma'",
"]",
"!=",
"0",
"and",
"np",
".",
"sum",
"(",
"stimfunction_tr",
")",
">",
"0",
":",
"# Calculate the task based noise time course",
"noise",
"=",
"_generate_noise_temporal_task",
"(",
"stimfunction_tr",
",",
")",
"# Create a brain shaped volume with similar smoothing properties",
"volume",
"=",
"_generate_noise_spatial",
"(",
"dimensions",
"=",
"dimensions",
",",
"mask",
"=",
"mask",
",",
"fwhm",
"=",
"noise_dict",
"[",
"'fwhm'",
"]",
",",
")",
"# Combine the volume and noise",
"noise_volume",
"+=",
"np",
".",
"multiply",
".",
"outer",
"(",
"volume",
",",
"noise",
")",
"*",
"noise_dict",
"[",
"'task_sigma'",
"]",
"# Finally, z score each voxel so things mix nicely",
"noise_volume",
"=",
"stats",
".",
"zscore",
"(",
"noise_volume",
",",
"3",
")",
"# If it is a nan it is because you just divided by zero (since some",
"# voxels are zeros in the template)",
"noise_volume",
"[",
"np",
".",
"isnan",
"(",
"noise_volume",
")",
"]",
"=",
"0",
"return",
"noise_volume"
] |
Wait until all task are executed .
|
async def wait_and_quit ( loop ) : from pylp . lib . tasks import running if running : await asyncio . wait ( map ( lambda runner : runner . future , running ) )
| 3,717
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/run.py#L49-L53
|
[
"def",
"create_api_call",
"(",
"func",
",",
"settings",
")",
":",
"def",
"base_caller",
"(",
"api_call",
",",
"_",
",",
"*",
"args",
")",
":",
"\"\"\"Simply call api_call and ignore settings.\"\"\"",
"return",
"api_call",
"(",
"*",
"args",
")",
"def",
"inner",
"(",
"request",
",",
"options",
"=",
"None",
")",
":",
"\"\"\"Invoke with the actual settings.\"\"\"",
"this_options",
"=",
"_merge_options_metadata",
"(",
"options",
",",
"settings",
")",
"this_settings",
"=",
"settings",
".",
"merge",
"(",
"this_options",
")",
"if",
"this_settings",
".",
"retry",
"and",
"this_settings",
".",
"retry",
".",
"retry_codes",
":",
"api_call",
"=",
"gax",
".",
"retry",
".",
"retryable",
"(",
"func",
",",
"this_settings",
".",
"retry",
",",
"*",
"*",
"this_settings",
".",
"kwargs",
")",
"else",
":",
"api_call",
"=",
"gax",
".",
"retry",
".",
"add_timeout_arg",
"(",
"func",
",",
"this_settings",
".",
"timeout",
",",
"*",
"*",
"this_settings",
".",
"kwargs",
")",
"api_call",
"=",
"_catch_errors",
"(",
"api_call",
",",
"gax",
".",
"config",
".",
"API_ERRORS",
")",
"return",
"api_caller",
"(",
"api_call",
",",
"this_settings",
",",
"request",
")",
"if",
"settings",
".",
"page_descriptor",
":",
"if",
"settings",
".",
"bundler",
"and",
"settings",
".",
"bundle_descriptor",
":",
"raise",
"ValueError",
"(",
"'The API call has incompatible settings: '",
"'bundling and page streaming'",
")",
"api_caller",
"=",
"_page_streamable",
"(",
"settings",
".",
"page_descriptor",
")",
"elif",
"settings",
".",
"bundler",
"and",
"settings",
".",
"bundle_descriptor",
":",
"api_caller",
"=",
"_bundleable",
"(",
"settings",
".",
"bundle_descriptor",
")",
"else",
":",
"api_caller",
"=",
"base_caller",
"return",
"inner"
] |
Return True if a record is published .
|
def is_published ( self ) : citeable = 'publication_info' in self . record and is_citeable ( self . record [ 'publication_info' ] ) submitted = 'dois' in self . record and any ( 'journal_title' in el for el in force_list ( self . record . get ( 'publication_info' ) ) ) return citeable or submitted
| 3,718
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L351-L382
|
[
"def",
"_check_rest_version",
"(",
"self",
",",
"version",
")",
":",
"version",
"=",
"str",
"(",
"version",
")",
"if",
"version",
"not",
"in",
"self",
".",
"supported_rest_versions",
":",
"msg",
"=",
"\"Library is incompatible with REST API version {0}\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"version",
")",
")",
"array_rest_versions",
"=",
"self",
".",
"_list_available_rest_versions",
"(",
")",
"if",
"version",
"not",
"in",
"array_rest_versions",
":",
"msg",
"=",
"\"Array is incompatible with REST API version {0}\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"version",
")",
")",
"return",
"LooseVersion",
"(",
"version",
")"
] |
Return the page range or the article id of a publication_info entry .
|
def get_page_artid_for_publication_info ( publication_info , separator ) : if 'artid' in publication_info : return publication_info [ 'artid' ] elif 'page_start' in publication_info and 'page_end' in publication_info : page_start = publication_info [ 'page_start' ] page_end = publication_info [ 'page_end' ] return text_type ( '{}{}{}' ) . format ( page_start , text_type ( separator ) , page_end ) return ''
| 3,719
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L449-L475
|
[
"def",
"deleteAllProfiles",
"(",
"self",
")",
":",
"settings",
"=",
"QtCore",
".",
"QSettings",
"(",
")",
"for",
"profGroupName",
"in",
"QtCore",
".",
"QSettings",
"(",
")",
".",
"childGroups",
"(",
")",
":",
"settings",
".",
"remove",
"(",
"profGroupName",
")"
] |
Return the page range or the article id of a record .
|
def get_page_artid ( self , separator = '-' ) : publication_info = get_value ( self . record , 'publication_info[0]' , default = { } ) return LiteratureReader . get_page_artid_for_publication_info ( publication_info , separator )
| 3,720
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L477-L504
|
[
"def",
"workers",
"(",
"profile",
"=",
"'default'",
")",
":",
"config",
"=",
"get_running",
"(",
"profile",
")",
"lbn",
"=",
"config",
"[",
"'worker.list'",
"]",
".",
"split",
"(",
"','",
")",
"worker_list",
"=",
"[",
"]",
"ret",
"=",
"{",
"}",
"for",
"lb",
"in",
"lbn",
":",
"try",
":",
"worker_list",
".",
"extend",
"(",
"config",
"[",
"'worker.{0}.balance_workers'",
".",
"format",
"(",
"lb",
")",
"]",
".",
"split",
"(",
"','",
")",
")",
"except",
"KeyError",
":",
"pass",
"worker_list",
"=",
"list",
"(",
"set",
"(",
"worker_list",
")",
")",
"for",
"worker",
"in",
"worker_list",
":",
"ret",
"[",
"worker",
"]",
"=",
"{",
"'activation'",
":",
"config",
"[",
"'worker.{0}.activation'",
".",
"format",
"(",
"worker",
")",
"]",
",",
"'state'",
":",
"config",
"[",
"'worker.{0}.state'",
".",
"format",
"(",
"worker",
")",
"]",
",",
"}",
"return",
"ret"
] |
Yield integers of dtype bit - length reverting their bit - order .
|
def chunkreverse ( integers , dtype = 'L' ) : if dtype in ( 'B' , 8 ) : return map ( RBYTES . __getitem__ , integers ) fmt = '{0:0%db}' % NBITS [ dtype ] return ( int ( fmt . format ( chunk ) [ : : - 1 ] , 2 ) for chunk in integers )
| 3,721
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L26-L40
|
[
"def",
"detach_session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_session",
"is",
"not",
"None",
":",
"self",
".",
"_session",
".",
"unsubscribe",
"(",
"self",
")",
"self",
".",
"_session",
"=",
"None"
] |
Return integer concatenating integer chunks of r > 0 bit - length .
|
def pack ( chunks , r = 32 ) : if r < 1 : raise ValueError ( 'pack needs r > 0' ) n = shift = 0 for c in chunks : n += c << shift shift += r return n
| 3,722
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L43-L66
|
[
"def",
"_scheduleRestart",
"(",
"self",
",",
"when",
":",
"Union",
"[",
"datetime",
",",
"str",
"]",
",",
"failTimeout",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"{}'s restarter processing restart\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"isinstance",
"(",
"when",
",",
"str",
")",
":",
"when",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"when",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"dateutil",
".",
"tz",
".",
"tzutc",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Restart of node '{}' has been scheduled on {}\"",
".",
"format",
"(",
"self",
".",
"nodeName",
",",
"when",
")",
")",
"ev_data",
"=",
"RestartLogData",
"(",
"when",
")",
"self",
".",
"_actionLog",
".",
"append_scheduled",
"(",
"ev_data",
")",
"callAgent",
"=",
"partial",
"(",
"self",
".",
"_callRestartAgent",
",",
"ev_data",
",",
"failTimeout",
")",
"delay",
"=",
"0",
"if",
"now",
"<",
"when",
":",
"delay",
"=",
"(",
"when",
"-",
"now",
")",
".",
"total_seconds",
"(",
")",
"self",
".",
"scheduledAction",
"=",
"ev_data",
"self",
".",
"_schedule",
"(",
"callAgent",
",",
"delay",
")"
] |
Yield r > 0 bit - length integers splitting n into chunks .
|
def unpack ( n , r = 32 ) : if r < 1 : raise ValueError ( 'unpack needs r > 0' ) mask = ( 1 << r ) - 1 while n : yield n & mask n >>= r
| 3,723
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L69-L90
|
[
"def",
"_scheduleRestart",
"(",
"self",
",",
"when",
":",
"Union",
"[",
"datetime",
",",
"str",
"]",
",",
"failTimeout",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"{}'s restarter processing restart\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"isinstance",
"(",
"when",
",",
"str",
")",
":",
"when",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"when",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"dateutil",
".",
"tz",
".",
"tzutc",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Restart of node '{}' has been scheduled on {}\"",
".",
"format",
"(",
"self",
".",
"nodeName",
",",
"when",
")",
")",
"ev_data",
"=",
"RestartLogData",
"(",
"when",
")",
"self",
".",
"_actionLog",
".",
"append_scheduled",
"(",
"ev_data",
")",
"callAgent",
"=",
"partial",
"(",
"self",
".",
"_callRestartAgent",
",",
"ev_data",
",",
"failTimeout",
")",
"delay",
"=",
"0",
"if",
"now",
"<",
"when",
":",
"delay",
"=",
"(",
"when",
"-",
"now",
")",
".",
"total_seconds",
"(",
")",
"self",
".",
"scheduledAction",
"=",
"ev_data",
"self",
".",
"_schedule",
"(",
"callAgent",
",",
"delay",
")"
] |
Yield integers concatenating bools in chunks of dtype bit - length .
|
def packbools ( bools , dtype = 'L' ) : r = NBITS [ dtype ] atoms = ATOMS [ dtype ] for chunk in zip_longest ( * [ iter ( bools ) ] * r , fillvalue = False ) : yield sum ( compress ( atoms , chunk ) )
| 3,724
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L93-L103
|
[
"def",
"_scheduleRestart",
"(",
"self",
",",
"when",
":",
"Union",
"[",
"datetime",
",",
"str",
"]",
",",
"failTimeout",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"{}'s restarter processing restart\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"isinstance",
"(",
"when",
",",
"str",
")",
":",
"when",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"when",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"dateutil",
".",
"tz",
".",
"tzutc",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Restart of node '{}' has been scheduled on {}\"",
".",
"format",
"(",
"self",
".",
"nodeName",
",",
"when",
")",
")",
"ev_data",
"=",
"RestartLogData",
"(",
"when",
")",
"self",
".",
"_actionLog",
".",
"append_scheduled",
"(",
"ev_data",
")",
"callAgent",
"=",
"partial",
"(",
"self",
".",
"_callRestartAgent",
",",
"ev_data",
",",
"failTimeout",
")",
"delay",
"=",
"0",
"if",
"now",
"<",
"when",
":",
"delay",
"=",
"(",
"when",
"-",
"now",
")",
".",
"total_seconds",
"(",
")",
"self",
".",
"scheduledAction",
"=",
"ev_data",
"self",
".",
"_schedule",
"(",
"callAgent",
",",
"delay",
")"
] |
Yield booleans unpacking integers of dtype bit - length .
|
def unpackbools ( integers , dtype = 'L' ) : atoms = ATOMS [ dtype ] for chunk in integers : for a in atoms : yield not not chunk & a
| 3,725
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L106-L116
|
[
"def",
"OnPreferences",
"(",
"self",
",",
"event",
")",
":",
"preferences",
"=",
"self",
".",
"interfaces",
".",
"get_preferences_from_user",
"(",
")",
"if",
"preferences",
":",
"for",
"key",
"in",
"preferences",
":",
"if",
"type",
"(",
"config",
"[",
"key",
"]",
")",
"in",
"(",
"type",
"(",
"u\"\"",
")",
",",
"type",
"(",
"\"\"",
")",
")",
":",
"config",
"[",
"key",
"]",
"=",
"preferences",
"[",
"key",
"]",
"else",
":",
"config",
"[",
"key",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"preferences",
"[",
"key",
"]",
")",
"self",
".",
"main_window",
".",
"grid",
".",
"grid_renderer",
".",
"cell_cache",
".",
"clear",
"(",
")",
"self",
".",
"main_window",
".",
"grid",
".",
"ForceRefresh",
"(",
")"
] |
Select information from valid arc lines to facilitate posterior fits .
|
def select_data_for_fit ( list_of_wvfeatures ) : nlines_arc = len ( list_of_wvfeatures ) nfit = 0 ifit = [ ] xfit = np . array ( [ ] ) yfit = np . array ( [ ] ) wfit = np . array ( [ ] ) for i in range ( nlines_arc ) : if list_of_wvfeatures [ i ] . line_ok : ifit . append ( i ) xfit = np . append ( xfit , [ list_of_wvfeatures [ i ] . xpos ] ) yfit = np . append ( yfit , [ list_of_wvfeatures [ i ] . reference ] ) wfit = np . append ( wfit , [ list_of_wvfeatures [ i ] . funcost ] ) nfit += 1 return nfit , ifit , xfit , yfit , wfit
| 3,726
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L39-L82
|
[
"def",
"WriteBlobs",
"(",
"self",
",",
"blob_id_data_map",
",",
"cursor",
"=",
"None",
")",
":",
"chunks",
"=",
"[",
"]",
"for",
"blob_id",
",",
"blob",
"in",
"iteritems",
"(",
"blob_id_data_map",
")",
":",
"chunks",
".",
"extend",
"(",
"_BlobToChunks",
"(",
"blob_id",
".",
"AsBytes",
"(",
")",
",",
"blob",
")",
")",
"for",
"values",
"in",
"_PartitionChunks",
"(",
"chunks",
")",
":",
"_Insert",
"(",
"cursor",
",",
"\"blobs\"",
",",
"values",
")"
] |
Compute information associated to triplets in master table .
|
def gen_triplets_master ( wv_master , geometry = None , debugplot = 0 ) : nlines_master = wv_master . size # Check that the wavelengths in the master table are sorted wv_previous = wv_master [ 0 ] for i in range ( 1 , nlines_master ) : if wv_previous >= wv_master [ i ] : raise ValueError ( 'Wavelengths:\n--> ' + str ( wv_previous ) + '\n--> ' + str ( wv_master [ i ] ) + '\nin master table are duplicated or not sorted' ) wv_previous = wv_master [ i ] # Generate all the possible triplets with the numbers of the lines # in the master table. Each triplet is defined as a tuple of three # numbers corresponding to the three line indices in the master # table. The collection of tuples is stored in an ordinary python # list. iter_comb_triplets = itertools . combinations ( range ( nlines_master ) , 3 ) triplets_master_list = [ val for val in iter_comb_triplets ] # Verify that the number of triplets coincides with the expected # value. ntriplets_master = len ( triplets_master_list ) if ntriplets_master == comb ( nlines_master , 3 , exact = True ) : if abs ( debugplot ) >= 10 : print ( '>>> Total number of lines in master table:' , nlines_master ) print ( '>>> Number of triplets in master table...:' , ntriplets_master ) else : raise ValueError ( 'Invalid number of combinations' ) # For each triplet, compute the relative position of the central # line. ratios_master = np . zeros ( ntriplets_master ) for index , value in enumerate ( triplets_master_list ) : i1 , i2 , i3 = value delta1 = wv_master [ i2 ] - wv_master [ i1 ] delta2 = wv_master [ i3 ] - wv_master [ i1 ] ratios_master [ index ] = delta1 / delta2 # Compute the array of indices that index the above ratios in # sorted order. isort_ratios_master = np . argsort ( ratios_master ) # Simultaneous sort of position ratios and triplets. ratios_master_sorted = ratios_master [ isort_ratios_master ] triplets_master_sorted_list = [ triplets_master_list [ i ] for i in isort_ratios_master ] if abs ( debugplot ) in [ 21 , 22 ] : # compute and plot histogram with position ratios bins_in = np . linspace ( 0.0 , 1.0 , 41 ) hist , bins_out = np . histogram ( ratios_master , bins = bins_in ) # from numina . array . display . matplotlib_qt import plt fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) width_hist = 0.8 * ( bins_out [ 1 ] - bins_out [ 0 ] ) center = ( bins_out [ : - 1 ] + bins_out [ 1 : ] ) / 2 ax . bar ( center , hist , align = 'center' , width = width_hist ) ax . set_xlabel ( 'distance ratio in each triplet' ) ax . set_ylabel ( 'Number of triplets' ) ax . set_title ( "Number of lines/triplets: " + str ( nlines_master ) + "/" + str ( ntriplets_master ) ) # set window geometry set_window_geometry ( geometry ) pause_debugplot ( debugplot , pltshow = True , tight_layout = True ) return ntriplets_master , ratios_master_sorted , triplets_master_sorted_list
| 3,727
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L324-L425
|
[
"def",
"cudnnSetPooling2dDescriptor",
"(",
"poolingDesc",
",",
"mode",
",",
"windowHeight",
",",
"windowWidth",
",",
"verticalPadding",
",",
"horizontalPadding",
",",
"verticalStride",
",",
"horizontalStride",
")",
":",
"status",
"=",
"_libcudnn",
".",
"cudnnSetPooling2dDescriptor",
"(",
"poolingDesc",
",",
"mode",
",",
"windowHeight",
",",
"windowWidth",
",",
"verticalPadding",
",",
"horizontalPadding",
",",
"verticalStride",
",",
"horizontalStride",
")",
"cudnnCheckStatus",
"(",
"status",
")"
] |
Performs arc line identification for arc calibration .
|
def arccalibration ( wv_master , xpos_arc , naxis1_arc , crpix1 , wv_ini_search , wv_end_search , wvmin_useful , wvmax_useful , error_xpos_arc , times_sigma_r , frac_triplets_for_sum , times_sigma_theil_sen , poly_degree_wfit , times_sigma_polfilt , times_sigma_cook , times_sigma_inclusion , geometry = None , debugplot = 0 ) : ntriplets_master , ratios_master_sorted , triplets_master_sorted_list = gen_triplets_master ( wv_master = wv_master , geometry = geometry , debugplot = debugplot ) list_of_wvfeatures = arccalibration_direct ( wv_master = wv_master , ntriplets_master = ntriplets_master , ratios_master_sorted = ratios_master_sorted , triplets_master_sorted_list = triplets_master_sorted_list , xpos_arc = xpos_arc , naxis1_arc = naxis1_arc , crpix1 = crpix1 , wv_ini_search = wv_ini_search , wv_end_search = wv_end_search , wvmin_useful = wvmin_useful , wvmax_useful = wvmax_useful , error_xpos_arc = error_xpos_arc , times_sigma_r = times_sigma_r , frac_triplets_for_sum = frac_triplets_for_sum , times_sigma_theil_sen = times_sigma_theil_sen , poly_degree_wfit = poly_degree_wfit , times_sigma_polfilt = times_sigma_polfilt , times_sigma_cook = times_sigma_cook , times_sigma_inclusion = times_sigma_inclusion , geometry = geometry , debugplot = debugplot ) return list_of_wvfeatures
| 3,728
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L428-L543
|
[
"def",
"export",
"(",
"cls",
",",
"folder",
",",
"particles",
",",
"datetimes",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"folder",
")",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"particle_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'particles.pickle'",
")",
"f",
"=",
"open",
"(",
"particle_path",
",",
"\"wb\"",
")",
"pickle",
".",
"dump",
"(",
"particles",
",",
"f",
")",
"f",
".",
"close",
"(",
")",
"datetimes_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'datetimes.pickle'",
")",
"f",
"=",
"open",
"(",
"datetimes_path",
",",
"\"wb\"",
")",
"pickle",
".",
"dump",
"(",
"datetimes",
",",
"f",
")",
"f",
".",
"close",
"(",
")"
] |
Match two lists with wavelengths .
|
def match_wv_arrays ( wv_master , wv_expected_all_peaks , delta_wv_max ) : # initialize the output array to zero wv_verified_all_peaks = np . zeros_like ( wv_expected_all_peaks ) # initialize to True array to indicate that no peak has already # been verified (this flag avoids duplication) wv_unused = np . ones_like ( wv_expected_all_peaks , dtype = bool ) # initialize to np.infty array to store minimum distance to already # identified line minimum_delta_wv = np . ones_like ( wv_expected_all_peaks , dtype = float ) minimum_delta_wv *= np . infty # since it is likely that len(wv_master) < len(wv_expected_all_peaks), # it is more convenient to execute the search in the following order for i in range ( len ( wv_master ) ) : j = np . searchsorted ( wv_expected_all_peaks , wv_master [ i ] ) if j == 0 : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False minimum_delta_wv [ j ] = delta_wv else : if delta_wv < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] minimum_delta_wv [ j ] = delta_wv elif j == len ( wv_expected_all_peaks ) : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) if delta_wv < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : delta_wv1 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) delta_wv2 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv1 < delta_wv2 : if delta_wv1 < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv1 < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : if delta_wv2 < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False else : if delta_wv2 < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] return wv_verified_all_peaks
| 3,729
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L1519-L1600
|
[
"def",
"get_var_rank",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"create_string_buffer",
"(",
"name",
")",
"rank",
"=",
"c_int",
"(",
")",
"self",
".",
"library",
".",
"get_var_rank",
".",
"argtypes",
"=",
"[",
"c_char_p",
",",
"POINTER",
"(",
"c_int",
")",
"]",
"self",
".",
"library",
".",
"get_var_rank",
".",
"restype",
"=",
"None",
"self",
".",
"library",
".",
"get_var_rank",
"(",
"name",
",",
"byref",
"(",
"rank",
")",
")",
"return",
"rank",
".",
"value"
] |
Set window geometry .
|
def set_window_geometry ( geometry ) : if geometry is not None : x_geom , y_geom , dx_geom , dy_geom = geometry mngr = plt . get_current_fig_manager ( ) if 'window' in dir ( mngr ) : try : mngr . window . setGeometry ( x_geom , y_geom , dx_geom , dy_geom ) except AttributeError : pass else : pass
| 3,730
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/matplotlib_qt.py#L8-L27
|
[
"def",
"_ar_matrix",
"(",
"self",
")",
":",
"Y",
"=",
"np",
".",
"array",
"(",
"self",
".",
"data",
"[",
"self",
".",
"max_lag",
":",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"]",
")",
"X",
"=",
"self",
".",
"data",
"[",
"(",
"self",
".",
"max_lag",
"-",
"1",
")",
":",
"-",
"1",
"]",
"if",
"self",
".",
"ar",
"!=",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"self",
".",
"ar",
")",
":",
"X",
"=",
"np",
".",
"vstack",
"(",
"(",
"X",
",",
"self",
".",
"data",
"[",
"(",
"self",
".",
"max_lag",
"-",
"i",
"-",
"1",
")",
":",
"-",
"i",
"-",
"1",
"]",
")",
")",
"return",
"X"
] |
Parse a fixed width line .
|
def parse_fixed_width ( types , lines ) : values = [ ] line = [ ] for width , parser in types : if not line : line = lines . pop ( 0 ) . replace ( '\n' , '' ) values . append ( parser ( line [ : width ] ) ) line = line [ width : ] return values
| 3,731
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L48-L59
|
[
"def",
"_translate_struct",
"(",
"inner_dict",
")",
":",
"try",
":",
"optional",
"=",
"inner_dict",
"[",
"'optional'",
"]",
".",
"items",
"(",
")",
"required",
"=",
"inner_dict",
"[",
"'required'",
"]",
".",
"items",
"(",
")",
"except",
"KeyError",
"as",
"ex",
":",
"raise",
"DeserializationError",
"(",
"\"Missing key: {}\"",
".",
"format",
"(",
"ex",
")",
")",
"except",
"AttributeError",
"as",
"ex",
":",
"raise",
"DeserializationError",
"(",
"\"Invalid Structure: {}\"",
".",
"format",
"(",
"inner_dict",
")",
")",
"val_dict",
"=",
"{",
"k",
":",
"_translate",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"required",
"}",
"val_dict",
".",
"update",
"(",
"{",
"Optional",
"(",
"k",
")",
":",
"_translate",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"optional",
"}",
")",
"return",
"val_dict"
] |
Parse nonlinear curves block .
|
def _parse_curves ( block , * * kwargs ) : count = int ( block . pop ( 0 ) ) curves = [ ] for i in range ( count ) : for param in [ 'mod_reduc' , 'damping' ] : length , name = parse_fixed_width ( [ ( 5 , int ) , ( 65 , to_str ) ] , block ) curves . append ( site . NonlinearProperty ( name , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , param ) ) length = int ( block [ 0 ] [ : 5 ] ) soil_types = parse_fixed_width ( ( length + 1 ) * [ ( 5 , int ) ] , block ) [ 1 : ] # Group soil type number and curves together return { ( soil_types [ i // 2 ] , c . param ) : c for i , c in enumerate ( curves ) }
| 3,732
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L62-L80
|
[
"def",
"createReaderUser",
"(",
"self",
",",
"accessibleBundles",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"__serviceAccount",
".",
"get_url",
"(",
")",
"+",
"'/'",
"+",
"self",
".",
"__serviceAccount",
".",
"get_instance_id",
"(",
")",
"+",
"'/v2/users/new'",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"data",
"=",
"{",
"}",
"data",
"[",
"'type'",
"]",
"=",
"'READER'",
"if",
"accessibleBundles",
"is",
"not",
"None",
":",
"data",
"[",
"'bundles'",
"]",
"=",
"accessibleBundles",
"json_data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"response",
"=",
"self",
".",
"__perform_rest_call",
"(",
"requestURL",
"=",
"url",
",",
"restType",
"=",
"'POST'",
",",
"body",
"=",
"json_data",
",",
"headers",
"=",
"headers",
")",
"return",
"response"
] |
Parse soil profile block .
|
def _parse_soil_profile ( block , units , curves , * * kwargs ) : wt_layer , length , _ , name = parse_fixed_width ( 3 * [ ( 5 , int ) ] + [ ( 55 , to_str ) ] , block ) layers = [ ] soil_types = [ ] for i in range ( length ) : index , soil_idx , thickness , shear_mod , damping , unit_wt , shear_vel = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 15 , to_float ) ] + 4 * [ ( 10 , to_float ) ] , block ) st = site . SoilType ( soil_idx , unit_wt , curves [ ( soil_idx , 'mod_reduc' ) ] , curves [ ( soil_idx , 'damping' ) ] , ) try : # Try to find previously added soil type st = soil_types [ soil_types . index ( st ) ] except ValueError : soil_types . append ( st ) layers . append ( site . Layer ( st , thickness , shear_vel ) ) if units == 'english' : # Convert from English to metric for st in soil_types : st . unit_wt *= 0.00015708746 for l in layers : l . thickness *= 0.3048 l . shear_vel *= 0.3048 p = site . Profile ( layers ) p . update_layers ( ) p . wt_depth = p [ wt_layer - 1 ] . depth return p
| 3,733
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L83-L123
|
[
"def",
"deletecols",
"(",
"X",
",",
"cols",
")",
":",
"if",
"isinstance",
"(",
"cols",
",",
"str",
")",
":",
"cols",
"=",
"cols",
".",
"split",
"(",
"','",
")",
"retain",
"=",
"[",
"n",
"for",
"n",
"in",
"X",
".",
"dtype",
".",
"names",
"if",
"n",
"not",
"in",
"cols",
"]",
"if",
"len",
"(",
"retain",
")",
">",
"0",
":",
"return",
"X",
"[",
"retain",
"]",
"else",
":",
"return",
"None"
] |
Parse motin specification block .
|
def _parse_motion ( block , * * kwargs ) : _ , fa_length , time_step , name , fmt = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 10 , float ) , ( 30 , to_str ) , ( 30 , to_str ) ] , block ) scale , pga , _ , header_lines , _ = parse_fixed_width ( 3 * [ ( 10 , to_float ) ] + 2 * [ ( 5 , int ) ] , block ) m = re . search ( r'(\d+)\w(\d+)\.\d+' , fmt ) count_per_line = int ( m . group ( 1 ) ) width = int ( m . group ( 2 ) ) fname = os . path . join ( os . path . dirname ( kwargs [ 'fname' ] ) , name ) accels = np . genfromtxt ( fname , delimiter = ( count_per_line * [ width ] ) , skip_header = header_lines , ) if np . isfinite ( scale ) : pass elif np . isfinite ( pga ) : scale = pga / np . abs ( accels ) . max ( ) else : scale = 1. accels *= scale m = motion . TimeSeriesMotion ( fname , '' , time_step , accels , fa_length ) return m
| 3,734
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L126-L154
|
[
"def",
"configure",
"(",
"self",
",",
"organization",
",",
"base_url",
"=",
"''",
",",
"ttl",
"=",
"''",
",",
"max_ttl",
"=",
"''",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"params",
"=",
"{",
"'organization'",
":",
"organization",
",",
"'base_url'",
":",
"base_url",
",",
"'ttl'",
":",
"ttl",
",",
"'max_ttl'",
":",
"max_ttl",
",",
"}",
"api_path",
"=",
"'/v1/auth/{mount_point}/config'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
] |
Parse input location block .
|
def _parse_input_loc ( block , profile , * * kwargs ) : layer , wave_field = parse_fixed_width ( 2 * [ ( 5 , int ) ] , block ) return profile . location ( motion . WaveField [ wave_field ] , index = ( layer - 1 ) , )
| 3,735
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L157-L163
|
[
"def",
"union",
"(",
"self",
",",
"*",
"dstreams",
")",
":",
"if",
"not",
"dstreams",
":",
"raise",
"ValueError",
"(",
"\"should have at least one DStream to union\"",
")",
"if",
"len",
"(",
"dstreams",
")",
"==",
"1",
":",
"return",
"dstreams",
"[",
"0",
"]",
"if",
"len",
"(",
"set",
"(",
"s",
".",
"_jrdd_deserializer",
"for",
"s",
"in",
"dstreams",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"All DStreams should have same serializer\"",
")",
"if",
"len",
"(",
"set",
"(",
"s",
".",
"_slideDuration",
"for",
"s",
"in",
"dstreams",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"All DStreams should have same slide duration\"",
")",
"cls",
"=",
"SparkContext",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"streaming",
".",
"api",
".",
"java",
".",
"JavaDStream",
"jdstreams",
"=",
"SparkContext",
".",
"_gateway",
".",
"new_array",
"(",
"cls",
",",
"len",
"(",
"dstreams",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"dstreams",
")",
")",
":",
"jdstreams",
"[",
"i",
"]",
"=",
"dstreams",
"[",
"i",
"]",
".",
"_jdstream",
"return",
"DStream",
"(",
"self",
".",
"_jssc",
".",
"union",
"(",
"jdstreams",
")",
",",
"self",
",",
"dstreams",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
")"
] |
Parse run control block .
|
def _parse_run_control ( block ) : _ , max_iterations , strain_ratio , _ , _ = parse_fixed_width ( 2 * [ ( 5 , int ) ] + [ ( 10 , float ) ] + 2 * [ ( 5 , int ) ] , block ) return propagation . EquivalentLinearCalculation ( strain_ratio , max_iterations , tolerance = 10. )
| 3,736
|
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L166-L172
|
[
"def",
"merge_entities",
"(",
"self",
",",
"from_entity_ids",
",",
"to_entity_id",
",",
"force",
"=",
"False",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"params",
"=",
"{",
"'from_entity_ids'",
":",
"from_entity_ids",
",",
"'to_entity_id'",
":",
"to_entity_id",
",",
"'force'",
":",
"force",
",",
"}",
"api_path",
"=",
"'/v1/{mount_point}/entity/merge'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
] |
Compute 1d block intervals to be used by combine .
|
def blockgen1d ( block , size ) : def numblock ( blk , x ) : """Compute recursively the numeric intervals
""" a , b = x if b - a <= blk : return [ x ] else : result = [ ] d = int ( b - a ) // 2 for i in imap ( numblock , [ blk , blk ] , [ ( a , a + d ) , ( a + d , b ) ] ) : result . extend ( i ) return result return [ slice ( * l ) for l in numblock ( block , ( 0 , size ) ) ]
| 3,737
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L23-L53
|
[
"def",
"calc_temp",
"(",
"Data_ref",
",",
"Data",
")",
":",
"T",
"=",
"300",
"*",
"(",
"(",
"Data",
".",
"A",
"*",
"Data_ref",
".",
"Gamma",
")",
"/",
"(",
"Data_ref",
".",
"A",
"*",
"Data",
".",
"Gamma",
")",
")",
"Data",
".",
"T",
"=",
"T",
"return",
"T"
] |
Generate a list of slice tuples to be used by combine .
|
def blockgen ( blocks , shape ) : iterables = [ blockgen1d ( l , s ) for ( l , s ) in zip ( blocks , shape ) ] return product ( * iterables )
| 3,738
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L56-L82
|
[
"def",
"estimate_conditional_information",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"xz",
"=",
"np",
".",
"concatenate",
"(",
"(",
"x",
",",
"z",
")",
",",
"axis",
"=",
"1",
")",
"yz",
"=",
"np",
".",
"concatenate",
"(",
"(",
"y",
",",
"z",
")",
",",
"axis",
"=",
"1",
")",
"xyz",
"=",
"np",
".",
"concatenate",
"(",
"(",
"xz",
",",
"y",
")",
",",
"axis",
"=",
"1",
")",
"epsilon",
"=",
"_calculate_epsilon",
"(",
"xyz",
")",
"h_xz",
"=",
"estimate_entropy",
"(",
"xz",
",",
"epsilon",
")",
"h_yz",
"=",
"estimate_entropy",
"(",
"yz",
",",
"epsilon",
")",
"h_xyz",
"=",
"estimate_entropy",
"(",
"xyz",
",",
"epsilon",
")",
"h_z",
"=",
"estimate_entropy",
"(",
"z",
",",
"epsilon",
")",
"return",
"max",
"(",
"0",
",",
"h_xz",
"+",
"h_yz",
"-",
"h_xyz",
"-",
"h_z",
")"
] |
Return the part of a 1d array covered by a block .
|
def blk_coverage_1d ( blk , size ) : rem = size % blk maxpix = size - rem return maxpix , rem
| 3,739
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L85-L100
|
[
"def",
"delete_entity",
"(",
"self",
",",
"entity_id",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"api_path",
"=",
"'/v1/{mount_point}/entity/id/{id}'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
",",
"id",
"=",
"entity_id",
",",
")",
"return",
"self",
".",
"_adapter",
".",
"delete",
"(",
"url",
"=",
"api_path",
",",
")"
] |
Return the maximum shape of an array covered by a block .
|
def max_blk_coverage ( blk , shape ) : return tuple ( blk_coverage_1d ( b , s ) [ 0 ] for b , s in zip ( blk , shape ) )
| 3,740
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L103-L117
|
[
"def",
"get_market_gainers",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"WNG_MSG",
",",
"(",
"\"get_market_gainers\"",
",",
"\"stocks.get_market_gainers\"",
")",
")",
"return",
"stocks",
".",
"get_market_gainers",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Iterate trough the blocks that strictly cover an array .
|
def blk_nd_short ( blk , shape ) : internals = ( blk_1d_short ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
| 3,741
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L137-L169
|
[
"def",
"post_chat_message",
"(",
"self",
",",
"merchant_id",
",",
"channel_id",
",",
"message",
")",
":",
"return",
"self",
".",
"do_req",
"(",
"'POST'",
",",
"self",
".",
"base_url",
"+",
"'/chat/v1/merchant/%s/channel/%s/message/'",
"%",
"(",
"merchant_id",
",",
"channel_id",
")",
",",
"message",
")"
] |
Iterate through the blocks that cover an array .
|
def blk_nd ( blk , shape ) : internals = ( blk_1d ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
| 3,742
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L192-L226
|
[
"def",
"update_version_descriptor",
"(",
"self",
",",
"task",
",",
"releasetype",
",",
"descriptor",
",",
"verbrowser",
",",
"commentbrowser",
")",
":",
"if",
"task",
"is",
"None",
":",
"null",
"=",
"treemodel",
".",
"TreeItem",
"(",
"None",
")",
"verbrowser",
".",
"set_model",
"(",
"treemodel",
".",
"TreeModel",
"(",
"null",
")",
")",
"return",
"m",
"=",
"self",
".",
"create_version_model",
"(",
"task",
",",
"releasetype",
",",
"descriptor",
")",
"verbrowser",
".",
"set_model",
"(",
"m",
")",
"commentbrowser",
".",
"set_model",
"(",
"m",
")"
] |
Provide a 2D block view to 2D array .
|
def block_view ( arr , block = ( 3 , 3 ) ) : # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape = ( arr . shape [ 0 ] // block [ 0 ] , arr . shape [ 1 ] // block [ 1 ] ) + block strides = ( block [ 0 ] * arr . strides [ 0 ] , block [ 1 ] * arr . strides [ 1 ] ) + arr . strides return ast ( arr , shape = shape , strides = strides )
| 3,743
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L229-L241
|
[
"def",
"authors_titles_validator",
"(",
"record",
",",
"result",
")",
":",
"record_authors",
"=",
"get_value",
"(",
"record",
",",
"'authors'",
",",
"[",
"]",
")",
"result_authors",
"=",
"get_value",
"(",
"result",
",",
"'_source.authors'",
",",
"[",
"]",
")",
"author_score",
"=",
"compute_author_match_score",
"(",
"record_authors",
",",
"result_authors",
")",
"title_max_score",
"=",
"0.0",
"record_titles",
"=",
"get_value",
"(",
"record",
",",
"'titles.title'",
",",
"[",
"]",
")",
"result_titles",
"=",
"get_value",
"(",
"result",
",",
"'_source.titles.title'",
",",
"[",
"]",
")",
"for",
"cartesian_pair",
"in",
"product",
"(",
"record_titles",
",",
"result_titles",
")",
":",
"record_title_tokens",
"=",
"get_tokenized_title",
"(",
"cartesian_pair",
"[",
"0",
"]",
")",
"result_title_tokens",
"=",
"get_tokenized_title",
"(",
"cartesian_pair",
"[",
"1",
"]",
")",
"current_title_jaccard",
"=",
"compute_jaccard_index",
"(",
"record_title_tokens",
",",
"result_title_tokens",
")",
"if",
"current_title_jaccard",
">",
"title_max_score",
"and",
"current_title_jaccard",
">=",
"0.5",
":",
"title_max_score",
"=",
"current_title_jaccard",
"return",
"(",
"author_score",
"+",
"title_max_score",
")",
"/",
"2",
">",
"0.5"
] |
Check some fields in order to define if the article is citeable .
|
def is_citeable ( publication_info ) : def _item_has_pub_info ( item ) : return all ( key in item for key in ( 'journal_title' , 'journal_volume' ) ) def _item_has_page_or_artid ( item ) : return any ( key in item for key in ( 'page_start' , 'artid' ) ) has_pub_info = any ( _item_has_pub_info ( item ) for item in publication_info ) has_page_or_artid = any ( _item_has_page_or_artid ( item ) for item in publication_info ) return has_pub_info and has_page_or_artid
| 3,744
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L46-L75
|
[
"def",
"Run",
"(",
"self",
")",
":",
"global",
"DB",
"# pylint: disable=global-statement",
"global",
"REL_DB",
"# pylint: disable=global-statement",
"global",
"BLOBS",
"# pylint: disable=global-statement",
"if",
"flags",
".",
"FLAGS",
".",
"list_storage",
":",
"self",
".",
"_ListStorageOptions",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"try",
":",
"cls",
"=",
"DataStore",
".",
"GetPlugin",
"(",
"config",
".",
"CONFIG",
"[",
"\"Datastore.implementation\"",
"]",
")",
"except",
"KeyError",
":",
"msg",
"=",
"(",
"\"No Storage System %s found.\"",
"%",
"config",
".",
"CONFIG",
"[",
"\"Datastore.implementation\"",
"]",
")",
"if",
"config",
".",
"CONFIG",
"[",
"\"Datastore.implementation\"",
"]",
"==",
"\"SqliteDataStore\"",
":",
"msg",
"=",
"\"The SQLite datastore is no longer supported.\"",
"print",
"(",
"msg",
")",
"print",
"(",
"\"Available options:\"",
")",
"self",
".",
"_ListStorageOptions",
"(",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"DB",
"=",
"cls",
"(",
")",
"# pylint: disable=g-bad-name",
"DB",
".",
"Initialize",
"(",
")",
"atexit",
".",
"register",
"(",
"DB",
".",
"Flush",
")",
"monitor_port",
"=",
"config",
".",
"CONFIG",
"[",
"\"Monitoring.http_port\"",
"]",
"if",
"monitor_port",
"!=",
"0",
":",
"DB",
".",
"InitializeMonitorThread",
"(",
")",
"# Initialize the blobstore.",
"blobstore_name",
"=",
"config",
".",
"CONFIG",
".",
"Get",
"(",
"\"Blobstore.implementation\"",
")",
"try",
":",
"cls",
"=",
"blob_store",
".",
"REGISTRY",
"[",
"blobstore_name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"No blob store %s found.\"",
"%",
"blobstore_name",
")",
"BLOBS",
"=",
"blob_store",
".",
"BlobStoreValidationWrapper",
"(",
"cls",
"(",
")",
")",
"# Initialize a relational DB if configured.",
"rel_db_name",
"=",
"config",
".",
"CONFIG",
"[",
"\"Database.implementation\"",
"]",
"if",
"not",
"rel_db_name",
":",
"return",
"try",
":",
"cls",
"=",
"registry_init",
".",
"REGISTRY",
"[",
"rel_db_name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Database %s not found.\"",
"%",
"rel_db_name",
")",
"logging",
".",
"info",
"(",
"\"Using database implementation %s\"",
",",
"rel_db_name",
")",
"REL_DB",
"=",
"db",
".",
"DatabaseValidationWrapper",
"(",
"cls",
"(",
")",
")"
] |
Add abstract .
|
def add_abstract ( self , abstract , source = None ) : self . _append_to ( 'abstracts' , self . _sourced_dict ( source , value = abstract . strip ( ) , ) )
| 3,745
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L150-L162
|
[
"def",
"_add_dependency",
"(",
"self",
",",
"dependency",
",",
"var_name",
"=",
"None",
")",
":",
"if",
"var_name",
"is",
"None",
":",
"var_name",
"=",
"next",
"(",
"self",
".",
"temp_var_names",
")",
"# Don't add duplicate dependencies",
"if",
"(",
"dependency",
",",
"var_name",
")",
"not",
"in",
"self",
".",
"dependencies",
":",
"self",
".",
"dependencies",
".",
"append",
"(",
"(",
"dependency",
",",
"var_name",
")",
")",
"return",
"var_name"
] |
Add arxiv eprint .
|
def add_arxiv_eprint ( self , arxiv_id , arxiv_categories ) : self . _append_to ( 'arxiv_eprints' , { 'value' : arxiv_id , 'categories' : arxiv_categories , } ) self . set_citeable ( True )
| 3,746
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L165-L178
|
[
"def",
"set",
"(",
"self",
",",
"folder",
":",
"str",
",",
"subscribed",
":",
"bool",
")",
"->",
"None",
":",
"if",
"subscribed",
":",
"self",
".",
"add",
"(",
"folder",
")",
"else",
":",
"self",
".",
"remove",
"(",
"folder",
")"
] |
Add doi .
|
def add_doi ( self , doi , source = None , material = None ) : if doi is None : return try : doi = idutils . normalize_doi ( doi ) except AttributeError : return if not doi : return dois = self . _sourced_dict ( source , value = doi ) if material is not None : dois [ 'material' ] = material self . _append_to ( 'dois' , dois )
| 3,747
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L181-L211
|
[
"def",
"detach_storage",
"(",
"self",
",",
"server",
",",
"address",
")",
":",
"body",
"=",
"{",
"'storage_device'",
":",
"{",
"'address'",
":",
"address",
"}",
"}",
"url",
"=",
"'/server/{0}/storage/detach'",
".",
"format",
"(",
"server",
")",
"res",
"=",
"self",
".",
"post_request",
"(",
"url",
",",
"body",
")",
"return",
"Storage",
".",
"_create_storage_objs",
"(",
"res",
"[",
"'server'",
"]",
"[",
"'storage_devices'",
"]",
",",
"cloud_manager",
"=",
"self",
")"
] |
Make a subrecord representing an author .
|
def make_author ( self , full_name , affiliations = ( ) , roles = ( ) , raw_affiliations = ( ) , source = None , ids = ( ) , emails = ( ) , alternative_names = ( ) ) : builder = SignatureBuilder ( ) builder . set_full_name ( full_name ) for affiliation in affiliations : builder . add_affiliation ( affiliation ) for role in roles : builder . add_inspire_role ( role ) for raw_affiliation in raw_affiliations : builder . add_raw_affiliation ( raw_affiliation , source or self . source ) for id_schema , id_value in ids : if id_schema and id_value : builder . set_uid ( id_value , schema = id_schema ) for email in emails : builder . add_email ( email ) for alternative_name in alternative_names : builder . add_alternative_name ( alternative_name ) return builder . obj
| 3,748
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L224-L273
|
[
"def",
"_send_and_wait",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"frame_id",
"=",
"self",
".",
"next_frame_id",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"frame_id",
"=",
"frame_id",
")",
")",
"self",
".",
"_send",
"(",
"*",
"*",
"kwargs",
")",
"timeout",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"const",
".",
"RX_TIMEOUT",
"while",
"datetime",
".",
"now",
"(",
")",
"<",
"timeout",
":",
"try",
":",
"frame",
"=",
"self",
".",
"_rx_frames",
".",
"pop",
"(",
"frame_id",
")",
"raise_if_error",
"(",
"frame",
")",
"return",
"frame",
"except",
"KeyError",
":",
"sleep",
"(",
"0.1",
")",
"continue",
"_LOGGER",
".",
"exception",
"(",
"\"Did not receive response within configured timeout period.\"",
")",
"raise",
"exceptions",
".",
"ZigBeeResponseTimeout",
"(",
")"
] |
Make a dictionary that is representing a book .
|
def add_book ( self , publisher = None , place = None , date = None ) : imprint = { } if date is not None : imprint [ 'date' ] = normalize_date ( date ) if place is not None : imprint [ 'place' ] = place if publisher is not None : imprint [ 'publisher' ] = publisher self . _append_to ( 'imprints' , imprint )
| 3,749
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L276-L302
|
[
"def",
"resample",
"(",
"grid",
",",
"wl",
",",
"flux",
")",
":",
"flux_rs",
"=",
"(",
"interpolate",
".",
"interp1d",
"(",
"wl",
",",
"flux",
")",
")",
"(",
"grid",
")",
"return",
"flux_rs"
] |
Add inspire categories .
|
def add_inspire_categories ( self , subject_terms , source = None ) : for category in subject_terms : category_dict = self . _sourced_dict ( source , term = category , ) self . _append_to ( 'inspire_categories' , category_dict )
| 3,750
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L344-L358
|
[
"def",
"load_toml_rest_api_config",
"(",
"filename",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Skipping rest api loading from non-existent config file: %s\"",
",",
"filename",
")",
"return",
"RestApiConfig",
"(",
")",
"LOGGER",
".",
"info",
"(",
"\"Loading rest api information from config: %s\"",
",",
"filename",
")",
"try",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"fd",
":",
"raw_config",
"=",
"fd",
".",
"read",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"RestApiConfigurationError",
"(",
"\"Unable to load rest api configuration file: {}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"toml_config",
"=",
"toml",
".",
"loads",
"(",
"raw_config",
")",
"invalid_keys",
"=",
"set",
"(",
"toml_config",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"[",
"'bind'",
",",
"'connect'",
",",
"'timeout'",
",",
"'opentsdb_db'",
",",
"'opentsdb_url'",
",",
"'opentsdb_username'",
",",
"'opentsdb_password'",
",",
"'client_max_size'",
"]",
")",
"if",
"invalid_keys",
":",
"raise",
"RestApiConfigurationError",
"(",
"\"Invalid keys in rest api config: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"sorted",
"(",
"list",
"(",
"invalid_keys",
")",
")",
")",
")",
")",
"config",
"=",
"RestApiConfig",
"(",
"bind",
"=",
"toml_config",
".",
"get",
"(",
"\"bind\"",
",",
"None",
")",
",",
"connect",
"=",
"toml_config",
".",
"get",
"(",
"'connect'",
",",
"None",
")",
",",
"timeout",
"=",
"toml_config",
".",
"get",
"(",
"'timeout'",
",",
"None",
")",
",",
"opentsdb_url",
"=",
"toml_config",
".",
"get",
"(",
"'opentsdb_url'",
",",
"None",
")",
",",
"opentsdb_db",
"=",
"toml_config",
".",
"get",
"(",
"'opentsdb_db'",
",",
"None",
")",
",",
"opentsdb_username",
"=",
"toml_config",
".",
"get",
"(",
"'opentsdb_username'",
",",
"None",
")",
",",
"opentsdb_password",
"=",
"toml_config",
".",
"get",
"(",
"'opentsdb_password'",
",",
"None",
")",
",",
"client_max_size",
"=",
"toml_config",
".",
"get",
"(",
"'client_max_size'",
",",
"None",
")",
")",
"return",
"config"
] |
Add a keyword .
|
def add_keyword ( self , keyword , schema = None , source = None ) : keyword_dict = self . _sourced_dict ( source , value = keyword ) if schema is not None : keyword_dict [ 'schema' ] = schema self . _append_to ( 'keywords' , keyword_dict )
| 3,751
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L361-L374
|
[
"def",
"setOverlayTransformOverlayRelative",
"(",
"self",
",",
"ulOverlayHandle",
",",
"ulOverlayHandleParent",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"setOverlayTransformOverlayRelative",
"pmatParentOverlayToOverlayTransform",
"=",
"HmdMatrix34_t",
"(",
")",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"ulOverlayHandleParent",
",",
"byref",
"(",
"pmatParentOverlayToOverlayTransform",
")",
")",
"return",
"result",
",",
"pmatParentOverlayToOverlayTransform"
] |
Add private notes .
|
def add_private_note ( self , private_notes , source = None ) : self . _append_to ( '_private_notes' , self . _sourced_dict ( source , value = private_notes , ) )
| 3,752
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L377-L389
|
[
"def",
"_wrap_client_error",
"(",
"e",
")",
":",
"error_code",
"=",
"e",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
"message",
"=",
"e",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Message'",
"]",
"if",
"error_code",
"==",
"'BadRequestException'",
":",
"if",
"\"Failed to copy S3 object. Access denied:\"",
"in",
"message",
":",
"match",
"=",
"re",
".",
"search",
"(",
"'bucket=(.+?), key=(.+?)$'",
",",
"message",
")",
"if",
"match",
":",
"return",
"S3PermissionsRequired",
"(",
"bucket",
"=",
"match",
".",
"group",
"(",
"1",
")",
",",
"key",
"=",
"match",
".",
"group",
"(",
"2",
")",
")",
"if",
"\"Invalid S3 URI\"",
"in",
"message",
":",
"return",
"InvalidS3UriError",
"(",
"message",
"=",
"message",
")",
"return",
"ServerlessRepoClientError",
"(",
"message",
"=",
"message",
")"
] |
Add publication info .
|
def add_publication_info ( self , year = None , cnum = None , artid = None , page_end = None , page_start = None , journal_issue = None , journal_title = None , journal_volume = None , pubinfo_freetext = None , material = None , parent_record = None , parent_isbn = None , ) : # If only journal title is present, and no other fields, assume the # paper was submitted, but not yet published if journal_title and all ( not field for field in ( cnum , artid , journal_issue , journal_volume , page_start , page_end ) ) : self . add_public_note ( 'Submitted to {}' . format ( journal_title ) ) return publication_item = { } for key in ( 'cnum' , 'artid' , 'page_end' , 'page_start' , 'journal_issue' , 'journal_title' , 'journal_volume' , 'year' , 'pubinfo_freetext' , 'material' ) : if locals ( ) [ key ] is not None : publication_item [ key ] = locals ( ) [ key ] if parent_record is not None : parent_item = { '$ref' : parent_record } publication_item [ 'parent_record' ] = parent_item if parent_isbn is not None : publication_item [ 'parent_isbn' ] = normalize_isbn ( parent_isbn ) if page_start and page_end : try : self . add_number_of_pages ( int ( page_end ) - int ( page_start ) + 1 ) except ( TypeError , ValueError ) : pass self . _append_to ( 'publication_info' , publication_item ) if is_citeable ( self . record [ 'publication_info' ] ) : self . set_citeable ( True )
| 3,753
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L392-L480
|
[
"def",
"create_pgroup_snapshot",
"(",
"self",
",",
"source",
",",
"*",
"*",
"kwargs",
")",
":",
"# In REST 1.4, support was added for snapshotting multiple pgroups. As a",
"# result, the endpoint response changed from an object to an array of",
"# objects. To keep the response type consistent between REST versions,",
"# we unbox the response when creating a single snapshot.",
"result",
"=",
"self",
".",
"create_pgroup_snapshots",
"(",
"[",
"source",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"_rest_version",
">=",
"LooseVersion",
"(",
"\"1.4\"",
")",
":",
"headers",
"=",
"result",
".",
"headers",
"result",
"=",
"ResponseDict",
"(",
"result",
"[",
"0",
"]",
")",
"result",
".",
"headers",
"=",
"headers",
"return",
"result"
] |
Add thesis info .
|
def add_thesis ( self , defense_date = None , degree_type = None , institution = None , date = None ) : self . record . setdefault ( 'thesis_info' , { } ) thesis_item = { } for key in ( 'defense_date' , 'date' ) : if locals ( ) [ key ] is not None : thesis_item [ key ] = locals ( ) [ key ] if degree_type is not None : thesis_item [ 'degree_type' ] = degree_type . lower ( ) if institution is not None : thesis_item [ 'institutions' ] = [ { 'name' : institution } ] self . record [ 'thesis_info' ] = thesis_item
| 3,754
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L503-L537
|
[
"def",
"reset_namespace",
"(",
"self",
",",
"namespace",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"%s.%s\"",
"%",
"(",
"self",
".",
"log_name",
",",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
")",
")",
"log",
".",
"setLevel",
"(",
"self",
".",
"log_level",
")",
"namespace",
"=",
"pick",
"(",
"namespace",
",",
"self",
".",
"namespace",
")",
"params",
"=",
"pick",
"(",
"params",
",",
"self",
".",
"namespace_params",
")",
"log",
".",
"warning",
"(",
"\" Reseting namespace '%s' at host: %s\"",
",",
"namespace",
",",
"self",
".",
"url",
")",
"try",
":",
"self",
".",
"delete_namespace",
"(",
"namespace",
")",
"except",
"RuntimeError",
":",
"pass",
"self",
".",
"create_namespace",
"(",
"namespace",
",",
"params",
")"
] |
Add license .
|
def add_license ( self , url = None , license = None , material = None , imposing = None ) : hep_license = { } try : license_from_url = get_license_from_url ( url ) if license_from_url is not None : license = license_from_url except ValueError : pass for key in ( 'url' , 'license' , 'material' , 'imposing' ) : if locals ( ) [ key ] is not None : hep_license [ key ] = locals ( ) [ key ] self . _append_to ( 'license' , hep_license )
| 3,755
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L559-L593
|
[
"def",
"_add_dependency",
"(",
"self",
",",
"dependency",
",",
"var_name",
"=",
"None",
")",
":",
"if",
"var_name",
"is",
"None",
":",
"var_name",
"=",
"next",
"(",
"self",
".",
"temp_var_names",
")",
"# Don't add duplicate dependencies",
"if",
"(",
"dependency",
",",
"var_name",
")",
"not",
"in",
"self",
".",
"dependencies",
":",
"self",
".",
"dependencies",
".",
"append",
"(",
"(",
"dependency",
",",
"var_name",
")",
")",
"return",
"var_name"
] |
Add public note .
|
def add_public_note ( self , public_note , source = None ) : self . _append_to ( 'public_notes' , self . _sourced_dict ( source , value = public_note , ) )
| 3,756
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L596-L608
|
[
"def",
"compose",
"(",
"list_of_files",
",",
"destination_file",
",",
"files_metadata",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"retry_params",
"=",
"None",
",",
"_account_id",
"=",
"None",
")",
":",
"api",
"=",
"storage_api",
".",
"_get_storage_api",
"(",
"retry_params",
"=",
"retry_params",
",",
"account_id",
"=",
"_account_id",
")",
"if",
"os",
".",
"getenv",
"(",
"'SERVER_SOFTWARE'",
")",
".",
"startswith",
"(",
"'Dev'",
")",
":",
"def",
"_temp_func",
"(",
"file_list",
",",
"destination_file",
",",
"content_type",
")",
":",
"bucket",
"=",
"'/'",
"+",
"destination_file",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"+",
"'/'",
"with",
"open",
"(",
"destination_file",
",",
"'w'",
",",
"content_type",
"=",
"content_type",
")",
"as",
"gcs_merge",
":",
"for",
"source_file",
"in",
"file_list",
":",
"with",
"open",
"(",
"bucket",
"+",
"source_file",
"[",
"'Name'",
"]",
",",
"'r'",
")",
"as",
"gcs_source",
":",
"gcs_merge",
".",
"write",
"(",
"gcs_source",
".",
"read",
"(",
")",
")",
"compose_object",
"=",
"_temp_func",
"else",
":",
"compose_object",
"=",
"api",
".",
"compose_object",
"file_list",
",",
"_",
"=",
"_validate_compose_list",
"(",
"destination_file",
",",
"list_of_files",
",",
"files_metadata",
",",
"32",
")",
"compose_object",
"(",
"file_list",
",",
"destination_file",
",",
"content_type",
")"
] |
Add title .
|
def add_title ( self , title , subtitle = None , source = None ) : title_entry = self . _sourced_dict ( source , title = title , ) if subtitle is not None : title_entry [ 'subtitle' ] = subtitle self . _append_to ( 'titles' , title_entry )
| 3,757
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L611-L630
|
[
"def",
"_add_dependency",
"(",
"self",
",",
"dependency",
",",
"var_name",
"=",
"None",
")",
":",
"if",
"var_name",
"is",
"None",
":",
"var_name",
"=",
"next",
"(",
"self",
".",
"temp_var_names",
")",
"# Don't add duplicate dependencies",
"if",
"(",
"dependency",
",",
"var_name",
")",
"not",
"in",
"self",
".",
"dependencies",
":",
"self",
".",
"dependencies",
".",
"append",
"(",
"(",
"dependency",
",",
"var_name",
")",
")",
"return",
"var_name"
] |
Add title translation .
|
def add_title_translation ( self , title , language , source = None ) : title_translation = self . _sourced_dict ( source , title = title , language = language , ) self . _append_to ( 'title_translations' , title_translation )
| 3,758
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L633-L651
|
[
"def",
"remove_server",
"(",
"self",
",",
"server_id",
")",
":",
"# Validate server_id",
"server",
"=",
"self",
".",
"_get_server",
"(",
"server_id",
")",
"# Delete any instances we recorded to be cleaned up",
"if",
"server_id",
"in",
"self",
".",
"_owned_subscriptions",
":",
"inst_list",
"=",
"self",
".",
"_owned_subscriptions",
"[",
"server_id",
"]",
"# We iterate backwards because we change the list",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"inst_list",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"inst",
"=",
"inst_list",
"[",
"i",
"]",
"server",
".",
"conn",
".",
"DeleteInstance",
"(",
"inst",
".",
"path",
")",
"del",
"inst_list",
"[",
"i",
"]",
"del",
"self",
".",
"_owned_subscriptions",
"[",
"server_id",
"]",
"if",
"server_id",
"in",
"self",
".",
"_owned_filters",
":",
"inst_list",
"=",
"self",
".",
"_owned_filters",
"[",
"server_id",
"]",
"# We iterate backwards because we change the list",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"inst_list",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"inst",
"=",
"inst_list",
"[",
"i",
"]",
"server",
".",
"conn",
".",
"DeleteInstance",
"(",
"inst",
".",
"path",
")",
"del",
"inst_list",
"[",
"i",
"]",
"del",
"self",
".",
"_owned_filters",
"[",
"server_id",
"]",
"if",
"server_id",
"in",
"self",
".",
"_owned_destinations",
":",
"inst_list",
"=",
"self",
".",
"_owned_destinations",
"[",
"server_id",
"]",
"# We iterate backwards because we change the list",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"inst_list",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"inst",
"=",
"inst_list",
"[",
"i",
"]",
"server",
".",
"conn",
".",
"DeleteInstance",
"(",
"inst",
".",
"path",
")",
"del",
"inst_list",
"[",
"i",
"]",
"del",
"self",
".",
"_owned_destinations",
"[",
"server_id",
"]",
"# Remove server from this listener",
"del",
"self",
".",
"_servers",
"[",
"server_id",
"]"
] |
Add report numbers .
|
def add_report_number ( self , report_number , source = None ) : self . _append_to ( 'report_numbers' , self . _sourced_dict ( source , value = report_number , ) )
| 3,759
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L665-L677
|
[
"def",
"_accountForNlinkEquals2",
"(",
"self",
",",
"localFilePath",
")",
":",
"fileStats",
"=",
"os",
".",
"stat",
"(",
"localFilePath",
")",
"assert",
"fileStats",
".",
"st_nlink",
">=",
"self",
".",
"nlinkThreshold",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cacheInfo",
".",
"sigmaJob",
"-=",
"fileStats",
".",
"st_size",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"cacheInfo",
".",
"jobState",
"[",
"self",
".",
"jobID",
"]",
")",
"jobState",
".",
"updateJobReqs",
"(",
"fileStats",
".",
"st_size",
",",
"'remove'",
")"
] |
Add collaboration .
|
def add_collaboration ( self , collaboration ) : collaborations = normalize_collaboration ( collaboration ) for collaboration in collaborations : self . _append_to ( 'collaborations' , { 'value' : collaboration } )
| 3,760
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L680-L690
|
[
"def",
"delete_advanced_configs",
"(",
"vm_name",
",",
"datacenter",
",",
"advanced_configs",
",",
"service_instance",
"=",
"None",
")",
":",
"datacenter_ref",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_datacenter",
"(",
"service_instance",
",",
"datacenter",
")",
"vm_ref",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_mor_by_property",
"(",
"service_instance",
",",
"vim",
".",
"VirtualMachine",
",",
"vm_name",
",",
"property_name",
"=",
"'name'",
",",
"container_ref",
"=",
"datacenter_ref",
")",
"config_spec",
"=",
"vim",
".",
"vm",
".",
"ConfigSpec",
"(",
")",
"removed_configs",
"=",
"_delete_advanced_config",
"(",
"config_spec",
",",
"advanced_configs",
",",
"vm_ref",
".",
"config",
".",
"extraConfig",
")",
"if",
"removed_configs",
":",
"salt",
".",
"utils",
".",
"vmware",
".",
"update_vm",
"(",
"vm_ref",
",",
"config_spec",
")",
"return",
"{",
"'removed_configs'",
":",
"removed_configs",
"}"
] |
Add Copyright .
|
def add_copyright ( self , material = None , holder = None , statement = None , url = None , year = None ) : copyright = { } for key in ( 'holder' , 'statement' , 'url' ) : if locals ( ) [ key ] is not None : copyright [ key ] = locals ( ) [ key ] if material is not None : copyright [ 'material' ] = material . lower ( ) if year is not None : copyright [ 'year' ] = int ( year ) self . _append_to ( 'copyright' , copyright )
| 3,761
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L754-L785
|
[
"def",
"on_response",
"(",
"self",
",",
"ch",
",",
"method_frame",
",",
"props",
",",
"body",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"rabbitmq.Requester.on_response\"",
")",
"if",
"self",
".",
"corr_id",
"==",
"props",
".",
"correlation_id",
":",
"self",
".",
"response",
"=",
"{",
"'props'",
":",
"props",
",",
"'body'",
":",
"body",
"}",
"else",
":",
"LOGGER",
".",
"warn",
"(",
"\"rabbitmq.Requester.on_response - discarded response : \"",
"+",
"str",
"(",
"props",
".",
"correlation_id",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"natsd.Requester.on_response - discarded response : \"",
"+",
"str",
"(",
"{",
"'properties'",
":",
"props",
",",
"'body'",
":",
"body",
"}",
")",
")"
] |
Add a figure .
|
def add_figure ( self , key , url , * * kwargs ) : figure = self . _check_metadata_for_file ( key = key , url = url , * * kwargs ) for dict_key in ( 'caption' , 'label' , 'material' , 'filename' , 'url' , 'original_url' , ) : if kwargs . get ( dict_key ) is not None : figure [ dict_key ] = kwargs [ dict_key ] if key_already_there ( figure , self . record . get ( 'figures' , ( ) ) ) : raise ValueError ( 'There\'s already a figure with the key %s.' % figure [ 'key' ] ) self . _append_to ( 'figures' , figure ) self . add_document
| 3,762
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L863-L899
|
[
"def",
"revoke_session",
"(",
"self",
",",
"sid",
"=",
"''",
",",
"token",
"=",
"''",
")",
":",
"if",
"not",
"sid",
":",
"if",
"token",
":",
"sid",
"=",
"self",
".",
"handler",
".",
"sid",
"(",
"token",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Need one of \"sid\" or \"token\"'",
")",
"for",
"typ",
"in",
"[",
"'access_token'",
",",
"'refresh_token'",
",",
"'code'",
"]",
":",
"try",
":",
"self",
".",
"revoke_token",
"(",
"self",
"[",
"sid",
"]",
"[",
"typ",
"]",
",",
"typ",
")",
"except",
"KeyError",
":",
"# If no such token has been issued",
"pass",
"self",
".",
"update",
"(",
"sid",
",",
"revoked",
"=",
"True",
")"
] |
Fit a rotation and a traslation between two sets points .
|
def fit_offset_and_rotation ( coords0 , coords1 ) : coords0 = numpy . asarray ( coords0 ) coords1 = numpy . asarray ( coords1 ) cp = coords0 . mean ( axis = 0 ) cq = coords1 . mean ( axis = 0 ) p0 = coords0 - cp q0 = coords1 - cq crossvar = numpy . dot ( numpy . transpose ( p0 ) , q0 ) u , _ , vt = linalg . svd ( crossvar ) d = linalg . det ( u ) * linalg . det ( vt ) if d < 0 : u [ : , - 1 ] = - u [ : , - 1 ] rot = numpy . transpose ( numpy . dot ( u , vt ) ) # Operation is # B - B0 = R(A - A0) # So off is B0 -R * A0 # The inverse operation is # A - A0 = R* (B- B0) # So inverse off* is A - R* B0 # where R* = transpose(R) # R * off* = -off off = - numpy . dot ( rot , cp ) + cq return off , rot
| 3,763
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/offrot.py#L17-L71
|
[
"def",
"get_indexed_string",
"(",
"self",
",",
"index",
")",
":",
"max_len",
"=",
"128",
"str_buf",
"=",
"ffi",
".",
"new",
"(",
"\"wchar_t[]\"",
",",
"str",
"(",
"bytearray",
"(",
"max_len",
")",
")",
")",
"ret",
"=",
"hidapi",
".",
"hid_get_indexed_string",
"(",
"self",
".",
"_device",
",",
"index",
",",
"str_buf",
",",
"max_len",
")",
"if",
"ret",
"<",
"0",
":",
"raise",
"HIDException",
"(",
"self",
".",
"_device",
".",
"get_error",
"(",
")",
")",
"elif",
"ret",
"==",
"0",
":",
"return",
"None",
"else",
":",
"return",
"ffi",
".",
"string",
"(",
"str_buf",
")",
".",
"encode",
"(",
"'utf-8'",
")"
] |
same as export_image3d but there is no output file PIL object is returned instead
|
def pil_image3d ( input , size = ( 800 , 600 ) , pcb_rotate = ( 0 , 0 , 0 ) , timeout = 20 , showgui = False ) : f = tempfile . NamedTemporaryFile ( suffix = '.png' , prefix = 'eagexp_' ) output = f . name export_image3d ( input , output = output , size = size , pcb_rotate = pcb_rotate , timeout = timeout , showgui = showgui ) im = Image . open ( output ) return im
| 3,764
|
https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/image3d.py#L80-L91
|
[
"def",
"_initStormCmds",
"(",
"self",
")",
":",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"MaxCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"MinCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"HelpCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"IdenCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"SpinCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"SudoCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"UniqCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"CountCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"GraphCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"LimitCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"SleepCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"DelNodeCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"MoveTagCmd",
")",
"self",
".",
"addStormCmd",
"(",
"s_storm",
".",
"ReIndexCmd",
")"
] |
Create a function that set the foreground color .
|
def _make_color_fn ( color ) : def _color ( text = "" ) : return ( _color_sep + color + _color_sep2 + text + _color_sep + "default" + _color_sep2 ) return _color
| 3,765
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L21-L26
|
[
"def",
"returnJobReqs",
"(",
"self",
",",
"jobReqs",
")",
":",
"# Since we are only reading this job's specific values from the state file, we don't",
"# need a lock",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"self",
".",
"_CacheState",
".",
"_load",
"(",
"self",
".",
"cacheStateFile",
")",
".",
"jobState",
"[",
"self",
".",
"jobID",
"]",
")",
"for",
"x",
"in",
"list",
"(",
"jobState",
".",
"jobSpecificFiles",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"deleteLocalFile",
"(",
"x",
")",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cacheInfo",
".",
"sigmaJob",
"-=",
"jobReqs"
] |
Log a text without adding the current time .
|
def just_log ( * texts , sep = "" ) : if config . silent : return text = _color_sep + "default" + _color_sep2 + sep . join ( texts ) array = text . split ( _color_sep ) for part in array : parts = part . split ( _color_sep2 , 1 ) if len ( parts ) != 2 or not parts [ 1 ] : continue if not config . color : print ( parts [ 1 ] , end = '' ) else : colors . foreground ( parts [ 0 ] ) print ( parts [ 1 ] , end = '' , flush = colors . is_win32 ) if config . color : colors . foreground ( "default" ) print ( )
| 3,766
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L34-L55
|
[
"def",
"destroy_sns_event",
"(",
"app_name",
",",
"env",
",",
"region",
")",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"env",
",",
"region_name",
"=",
"region",
")",
"sns_client",
"=",
"session",
".",
"client",
"(",
"'sns'",
")",
"lambda_subscriptions",
"=",
"get_sns_subscriptions",
"(",
"app_name",
"=",
"app_name",
",",
"env",
"=",
"env",
",",
"region",
"=",
"region",
")",
"for",
"subscription_arn",
"in",
"lambda_subscriptions",
":",
"sns_client",
".",
"unsubscribe",
"(",
"SubscriptionArn",
"=",
"subscription_arn",
")",
"LOG",
".",
"debug",
"(",
"\"Lambda SNS event deleted\"",
")",
"return",
"True"
] |
Log a text .
|
def log ( * texts , sep = "" ) : text = sep . join ( texts ) count = text . count ( "\n" ) just_log ( "\n" * count , * get_time ( ) , text . replace ( "\n" , "" ) , sep = sep )
| 3,767
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L63-L67
|
[
"def",
"returnJobReqs",
"(",
"self",
",",
"jobReqs",
")",
":",
"# Since we are only reading this job's specific values from the state file, we don't",
"# need a lock",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"self",
".",
"_CacheState",
".",
"_load",
"(",
"self",
".",
"cacheStateFile",
")",
".",
"jobState",
"[",
"self",
".",
"jobID",
"]",
")",
"for",
"x",
"in",
"list",
"(",
"jobState",
".",
"jobSpecificFiles",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"deleteLocalFile",
"(",
"x",
")",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cacheInfo",
".",
"sigmaJob",
"-=",
"jobReqs"
] |
Find files to include .
|
def find_files ( globs ) : last_cwd = os . getcwd ( ) os . chdir ( config . cwd ) gex , gin = separate_globs ( globs ) # Find excluded files exclude = [ ] for glob in gex : parse_glob ( glob , exclude ) files = [ ] include = [ ] order = 0 # Find included files and removed excluded files for glob in gin : order += 1 array = parse_glob ( glob , include ) base = find_base ( glob ) for file in array : if file not in exclude : files . append ( ( order , base , file ) ) os . chdir ( last_cwd ) return files
| 3,768
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/src.py#L17-L45
|
[
"def",
"volumes_delete",
"(",
"storage_pool",
",",
"logger",
")",
":",
"try",
":",
"for",
"vol_name",
"in",
"storage_pool",
".",
"listVolumes",
"(",
")",
":",
"try",
":",
"vol",
"=",
"storage_pool",
".",
"storageVolLookupByName",
"(",
"vol_name",
")",
"vol",
".",
"delete",
"(",
"0",
")",
"except",
"libvirt",
".",
"libvirtError",
":",
"logger",
".",
"exception",
"(",
"\"Unable to delete storage volume %s.\"",
",",
"vol_name",
")",
"except",
"libvirt",
".",
"libvirtError",
":",
"logger",
".",
"exception",
"(",
"\"Unable to delete storage volumes.\"",
")"
] |
Read some files and return a stream .
|
def src ( globs , * * options ) : # Create an array of globs if only one string is given if isinstance ( globs , str ) : globs = [ globs ] # Find files files = find_files ( globs ) # Create a stream stream = Stream ( ) # Options options [ "cwd" ] = config . cwd if "base" in options : options [ "base" ] = os . path . abspath ( options [ "base" ] ) # Create a File object for each file to include for infile in files : file = File ( infile [ 2 ] , * * options ) file . relpath = file . path file . order = infile [ 0 ] file . base = options . get ( "base" , infile [ 1 ] ) stream . append_file ( file ) # No more files to add stream . end_of_stream ( ) # Pipe a file reader and return the stream if options . get ( "read" , True ) : return stream . pipe ( FileReader ( ) ) return stream
| 3,769
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/src.py#L49-L82
|
[
"def",
"check_image_size_incorrect",
"(",
"self",
")",
":",
"last_virtual_address",
"=",
"0",
"last_virtual_size",
"=",
"0",
"section_alignment",
"=",
"self",
".",
"pefile_handle",
".",
"OPTIONAL_HEADER",
".",
"SectionAlignment",
"total_image_size",
"=",
"self",
".",
"pefile_handle",
".",
"OPTIONAL_HEADER",
".",
"SizeOfImage",
"for",
"section",
"in",
"self",
".",
"pefile_handle",
".",
"sections",
":",
"if",
"section",
".",
"VirtualAddress",
">",
"last_virtual_address",
":",
"last_virtual_address",
"=",
"section",
".",
"VirtualAddress",
"last_virtual_size",
"=",
"section",
".",
"Misc_VirtualSize",
"# Just pad the size to be equal to the alignment and check for mismatch",
"last_virtual_size",
"+=",
"section_alignment",
"-",
"(",
"last_virtual_size",
"%",
"section_alignment",
")",
"if",
"(",
"last_virtual_address",
"+",
"last_virtual_size",
")",
"!=",
"total_image_size",
":",
"return",
"{",
"'description'",
":",
"'Image size does not match reported size'",
",",
"'severity'",
":",
"3",
",",
"'category'",
":",
"'MALFORMED'",
"}",
"return",
"None"
] |
Decorate function adding a logger handler stored in FITS .
|
def log_to_history ( logger , name ) : def log_to_history_decorator ( method ) : def l2h_method ( self , ri ) : history_header = fits . Header ( ) fh = FITSHistoryHandler ( history_header ) fh . setLevel ( logging . INFO ) logger . addHandler ( fh ) try : result = method ( self , ri ) field = getattr ( result , name , None ) if field : with field . open ( ) as hdulist : hdr = hdulist [ 0 ] . header hdr . extend ( history_header . cards ) return result finally : logger . removeHandler ( fh ) return l2h_method return log_to_history_decorator
| 3,770
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/logger.py#L30-L54
|
[
"def",
"run_transaction",
"(",
"transactor",
",",
"callback",
")",
":",
"if",
"isinstance",
"(",
"transactor",
",",
"sqlalchemy",
".",
"engine",
".",
"Connection",
")",
":",
"return",
"_txn_retry_loop",
"(",
"transactor",
",",
"callback",
")",
"elif",
"isinstance",
"(",
"transactor",
",",
"sqlalchemy",
".",
"engine",
".",
"Engine",
")",
":",
"with",
"transactor",
".",
"connect",
"(",
")",
"as",
"connection",
":",
"return",
"_txn_retry_loop",
"(",
"connection",
",",
"callback",
")",
"elif",
"isinstance",
"(",
"transactor",
",",
"sqlalchemy",
".",
"orm",
".",
"sessionmaker",
")",
":",
"session",
"=",
"transactor",
"(",
"autocommit",
"=",
"True",
")",
"return",
"_txn_retry_loop",
"(",
"session",
",",
"callback",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"don't know how to run a transaction on %s\"",
",",
"type",
"(",
"transactor",
")",
")"
] |
Create metadata structure
|
def create_db_info ( ) : result = { } result [ 'instrument' ] = '' result [ 'uuid' ] = '' result [ 'tags' ] = { } result [ 'type' ] = '' result [ 'mode' ] = '' result [ 'observation_date' ] = "" result [ 'origin' ] = { } return result
| 3,771
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/types/base.py#L151-L161
|
[
"def",
"_timestamp_regulator",
"(",
"self",
")",
":",
"unified_timestamps",
"=",
"_PrettyDefaultDict",
"(",
"list",
")",
"staged_files",
"=",
"self",
".",
"_list_audio_files",
"(",
"sub_dir",
"=",
"\"staging\"",
")",
"for",
"timestamp_basename",
"in",
"self",
".",
"__timestamps_unregulated",
":",
"if",
"len",
"(",
"self",
".",
"__timestamps_unregulated",
"[",
"timestamp_basename",
"]",
")",
">",
"1",
":",
"# File has been splitted",
"timestamp_name",
"=",
"''",
".",
"join",
"(",
"timestamp_basename",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"staged_splitted_files_of_timestamp",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"staged_file",
":",
"(",
"timestamp_name",
"==",
"staged_file",
"[",
":",
"-",
"3",
"]",
"and",
"all",
"(",
"[",
"(",
"x",
"in",
"set",
"(",
"map",
"(",
"str",
",",
"range",
"(",
"10",
")",
")",
")",
")",
"for",
"x",
"in",
"staged_file",
"[",
"-",
"3",
":",
"]",
"]",
")",
")",
",",
"staged_files",
")",
")",
"if",
"len",
"(",
"staged_splitted_files_of_timestamp",
")",
"==",
"0",
":",
"self",
".",
"__errors",
"[",
"(",
"time",
"(",
")",
",",
"timestamp_basename",
")",
"]",
"=",
"{",
"\"reason\"",
":",
"\"Missing staged file\"",
",",
"\"current_staged_files\"",
":",
"staged_files",
"}",
"continue",
"staged_splitted_files_of_timestamp",
".",
"sort",
"(",
")",
"unified_timestamp",
"=",
"list",
"(",
")",
"for",
"staging_digits",
",",
"splitted_file",
"in",
"enumerate",
"(",
"self",
".",
"__timestamps_unregulated",
"[",
"timestamp_basename",
"]",
")",
":",
"prev_splits_sec",
"=",
"0",
"if",
"int",
"(",
"staging_digits",
")",
"!=",
"0",
":",
"prev_splits_sec",
"=",
"self",
".",
"_get_audio_duration_seconds",
"(",
"\"{}/staging/{}{:03d}\"",
".",
"format",
"(",
"self",
".",
"src_dir",
",",
"timestamp_name",
",",
"staging_digits",
"-",
"1",
")",
")",
"for",
"word_block",
"in",
"splitted_file",
":",
"unified_timestamp",
".",
"append",
"(",
"_WordBlock",
"(",
"word",
"=",
"word_block",
".",
"word",
",",
"start",
"=",
"round",
"(",
"word_block",
".",
"start",
"+",
"prev_splits_sec",
",",
"2",
")",
",",
"end",
"=",
"round",
"(",
"word_block",
".",
"end",
"+",
"prev_splits_sec",
",",
"2",
")",
")",
")",
"unified_timestamps",
"[",
"str",
"(",
"timestamp_basename",
")",
"]",
"+=",
"unified_timestamp",
"else",
":",
"unified_timestamps",
"[",
"timestamp_basename",
"]",
"+=",
"self",
".",
"__timestamps_unregulated",
"[",
"timestamp_basename",
"]",
"[",
"0",
"]",
"self",
".",
"__timestamps",
".",
"update",
"(",
"unified_timestamps",
")",
"self",
".",
"__timestamps_unregulated",
"=",
"_PrettyDefaultDict",
"(",
"list",
")"
] |
Decorator for creating a task .
|
def task ( obj = None , deps = None ) : # The decorator is not used as a function if callable ( obj ) : __task ( obj . __name__ , obj ) return obj # The decorator is used as a function def __decorated ( func ) : __task ( obj if obj else obj . __name__ , deps , func ) return func return __decorated
| 3,772
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/decorators.py#L18-L30
|
[
"def",
"serverinfo",
"(",
"url",
"=",
"'http://localhost:8080/manager'",
",",
"timeout",
"=",
"180",
")",
":",
"data",
"=",
"_wget",
"(",
"'serverinfo'",
",",
"{",
"}",
",",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"data",
"[",
"'res'",
"]",
"is",
"False",
":",
"return",
"{",
"'error'",
":",
"data",
"[",
"'msg'",
"]",
"}",
"ret",
"=",
"{",
"}",
"data",
"[",
"'msg'",
"]",
".",
"pop",
"(",
"0",
")",
"for",
"line",
"in",
"data",
"[",
"'msg'",
"]",
":",
"tmp",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"ret",
"[",
"tmp",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"]",
"=",
"tmp",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"ret"
] |
Read a single byte or raise OSError on failure .
|
def _read_one_byte ( self , fd ) : c = os . read ( fd , 1 ) if not c : raise OSError return c
| 3,773
|
https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/recorder.py#L141-L146
|
[
"def",
"ttl",
"(",
"self",
",",
"value",
")",
":",
"# get timer",
"timer",
"=",
"getattr",
"(",
"self",
",",
"Annotation",
".",
"__TIMER",
",",
"None",
")",
"# if timer is running, stop the timer",
"if",
"timer",
"is",
"not",
"None",
":",
"timer",
".",
"cancel",
"(",
")",
"# initialize timestamp",
"timestamp",
"=",
"None",
"# if value is None",
"if",
"value",
"is",
"None",
":",
"# nonify timer",
"timer",
"=",
"None",
"else",
":",
"# else, renew a timer",
"# get timestamp",
"timestamp",
"=",
"time",
"(",
")",
"+",
"value",
"# start a new timer",
"timer",
"=",
"Timer",
"(",
"value",
",",
"self",
".",
"__del__",
")",
"timer",
".",
"start",
"(",
")",
"# set/update attributes",
"setattr",
"(",
"self",
",",
"Annotation",
".",
"__TIMER",
",",
"timer",
")",
"setattr",
"(",
"self",
",",
"Annotation",
".",
"__TS",
",",
"timestamp",
")"
] |
Auxiliary function to give an error if the file already exists .
|
def arg_file_is_new ( parser , arg , mode = 'w' ) : if os . path . exists ( arg ) : parser . error ( "\nThe file \"%s\"\nalready exists and " "cannot be overwritten!" % arg ) else : # return an open file handle handler = open ( arg , mode = mode ) return handler
| 3,774
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/tools/arg_file_is_new.py#L17-L42
|
[
"def",
"perfcounters",
"(",
"infile",
")",
":",
"measurements",
"=",
"[",
"]",
"with",
"open",
"(",
"infile",
",",
"'r'",
")",
"as",
"in_file",
":",
"read_struct",
"(",
"in_file",
")",
"for",
"region_struct",
"in",
"read_structs",
"(",
"in_file",
")",
":",
"region",
"=",
"region_struct",
"[",
"\"1\"",
"]",
"[",
"1",
"]",
"core_info",
"=",
"region_struct",
"[",
"\"Region Info\"",
"]",
"measurements",
"+=",
"get_measurements",
"(",
"region",
",",
"core_info",
",",
"region_struct",
")",
"for",
"table_struct",
"in",
"read_tables",
"(",
"in_file",
")",
":",
"core_info",
"=",
"None",
"if",
"\"Event\"",
"in",
"table_struct",
":",
"offset",
"=",
"1",
"core_info",
"=",
"table_struct",
"[",
"\"Event\"",
"]",
"[",
"offset",
":",
"]",
"measurements",
"+=",
"get_measurements",
"(",
"region",
",",
"core_info",
",",
"table_struct",
",",
"offset",
")",
"elif",
"\"Metric\"",
"in",
"table_struct",
":",
"core_info",
"=",
"table_struct",
"[",
"\"Metric\"",
"]",
"measurements",
"+=",
"get_measurements",
"(",
"region",
",",
"core_info",
",",
"table_struct",
")",
"return",
"measurements"
] |
Compute intersection of spectrum trail with arc line .
|
def intersection_spectrail_arcline ( spectrail , arcline ) : # approximate location of the solution expected_x = ( arcline . xlower_line + arcline . xupper_line ) / 2.0 # composition of polynomials to find intersection as # one of the roots of a new polynomial rootfunct = arcline . poly_funct ( spectrail . poly_funct ) rootfunct . coef [ 1 ] -= 1 # compute roots to find solution tmp_xroots = rootfunct . roots ( ) # take the nearest root to the expected location xroot = tmp_xroots [ np . abs ( tmp_xroots - expected_x ) . argmin ( ) ] if np . isreal ( xroot ) : xroot = xroot . real else : raise ValueError ( "xroot=" + str ( xroot ) + " is a complex number" ) yroot = spectrail . poly_funct ( xroot ) return xroot , yroot
| 3,775
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/ccd_line.py#L250-L286
|
[
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"batch_object",
"=",
"request",
".",
"data",
"or",
"{",
"}",
"try",
":",
"user",
",",
"course_key",
",",
"blocks",
"=",
"self",
".",
"_validate_and_parse",
"(",
"batch_object",
")",
"BlockCompletion",
".",
"objects",
".",
"submit_batch_completion",
"(",
"user",
",",
"course_key",
",",
"blocks",
")",
"except",
"ValidationError",
"as",
"exc",
":",
"return",
"Response",
"(",
"{",
"\"detail\"",
":",
"_",
"(",
"' '",
")",
".",
"join",
"(",
"text_type",
"(",
"msg",
")",
"for",
"msg",
"in",
"exc",
".",
"messages",
")",
",",
"}",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")",
"except",
"ValueError",
"as",
"exc",
":",
"return",
"Response",
"(",
"{",
"\"detail\"",
":",
"text_type",
"(",
"exc",
")",
",",
"}",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")",
"except",
"ObjectDoesNotExist",
"as",
"exc",
":",
"return",
"Response",
"(",
"{",
"\"detail\"",
":",
"text_type",
"(",
"exc",
")",
",",
"}",
",",
"status",
"=",
"status",
".",
"HTTP_404_NOT_FOUND",
")",
"except",
"DatabaseError",
"as",
"exc",
":",
"return",
"Response",
"(",
"{",
"\"detail\"",
":",
"text_type",
"(",
"exc",
")",
",",
"}",
",",
"status",
"=",
"status",
".",
"HTTP_500_INTERNAL_SERVER_ERROR",
")",
"return",
"Response",
"(",
"{",
"\"detail\"",
":",
"_",
"(",
"\"ok\"",
")",
"}",
",",
"status",
"=",
"status",
".",
"HTTP_200_OK",
")"
] |
Return a copy of self shifted a constant offset .
|
def offset ( self , offset_value ) : new_instance = deepcopy ( self ) new_instance . poly_funct . coef [ 0 ] += offset_value return new_instance
| 3,776
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/ccd_line.py#L205-L217
|
[
"def",
"get_display_name_metadata",
"(",
"self",
")",
":",
"metadata",
"=",
"dict",
"(",
"self",
".",
"_mdata",
"[",
"'display_name'",
"]",
")",
"metadata",
".",
"update",
"(",
"{",
"'existing_string_values'",
":",
"self",
".",
"_my_map",
"[",
"'displayName'",
"]",
"[",
"'text'",
"]",
"}",
")",
"return",
"Metadata",
"(",
"*",
"*",
"metadata",
")"
] |
Compute output = file1 operation file2 .
|
def compute_operation ( file1 , file2 , operation , output , display , args_z1z2 , args_bbox , args_keystitle , args_geometry ) : # read first FITS file with fits . open ( file1 ) as hdulist : image_header1 = hdulist [ 0 ] . header image1 = hdulist [ 0 ] . data . astype ( np . float ) naxis1 = image_header1 [ 'naxis1' ] naxis2 = image_header1 [ 'naxis2' ] # if required, display file1 if display == 'all' : ximshow_file ( file1 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) # read second FITS file with fits . open ( file2 ) as hdulist : image_header2 = hdulist [ 0 ] . header image2 = hdulist [ 0 ] . data . astype ( np . float ) naxis1_ = image_header2 [ 'naxis1' ] naxis2_ = image_header2 [ 'naxis2' ] # if required, display file2 if display == 'all' : ximshow_file ( file2 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) # check dimensions if naxis1 != naxis1_ : raise ValueError ( "NAXIS1 values are different." ) if naxis2 != naxis2_ : raise ValueError ( "NAXIS2 values are different." ) # compute operation if operation == "+" : solution = image1 + image2 elif operation == "-" : solution = image1 - image2 elif operation == "*" : solution = image1 * image2 elif operation == "/" : solution = image1 / image2 else : raise ValueError ( "Unexpected operation=" + str ( operation ) ) # save output file hdu = fits . PrimaryHDU ( solution . astype ( np . float ) , image_header1 ) hdu . writeto ( output , overwrite = True ) # if required, display result if display in [ 'all' , 'result' ] : ximshow_file ( output . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 )
| 3,777
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/tools/imath.py#L25-L111
|
[
"def",
"external_metadata",
"(",
"self",
",",
"datasource_type",
"=",
"None",
",",
"datasource_id",
"=",
"None",
")",
":",
"if",
"datasource_type",
"==",
"'druid'",
":",
"datasource",
"=",
"ConnectorRegistry",
".",
"get_datasource",
"(",
"datasource_type",
",",
"datasource_id",
",",
"db",
".",
"session",
")",
"elif",
"datasource_type",
"==",
"'table'",
":",
"database",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Database",
")",
".",
"filter_by",
"(",
"id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'db_id'",
")",
")",
".",
"one",
"(",
")",
")",
"Table",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'table'",
"]",
"datasource",
"=",
"Table",
"(",
"database",
"=",
"database",
",",
"table_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'table_name'",
")",
",",
"schema",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'schema'",
")",
"or",
"None",
",",
")",
"external_metadata",
"=",
"datasource",
".",
"external_metadata",
"(",
")",
"return",
"self",
".",
"json_response",
"(",
"external_metadata",
")"
] |
Compute a robust estimator of the standard deviation
|
def robust_std ( x , debug = False ) : x = numpy . asarray ( x ) # compute percentiles and robust estimator q25 = numpy . percentile ( x , 25 ) q75 = numpy . percentile ( x , 75 ) sigmag = 0.7413 * ( q75 - q25 ) if debug : print ( 'debug|sigmag -> q25......................:' , q25 ) print ( 'debug|sigmag -> q75......................:' , q75 ) print ( 'debug|sigmag -> Robust standard deviation:' , sigmag ) return sigmag
| 3,778
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/stats.py#L17-L48
|
[
"def",
"ParseApplicationResourceUsage",
"(",
"self",
",",
"parser_mediator",
",",
"cache",
"=",
"None",
",",
"database",
"=",
"None",
",",
"table",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"self",
".",
"_ParseGUIDTable",
"(",
"parser_mediator",
",",
"cache",
",",
"database",
",",
"table",
",",
"self",
".",
"_APPLICATION_RESOURCE_USAGE_VALUES_MAP",
",",
"SRUMApplicationResourceUsageEventData",
")"
] |
Compute basic statistical parameters .
|
def summary ( x , rm_nan = False , debug = False ) : # protections if type ( x ) is np . ndarray : xx = np . copy ( x ) else : if type ( x ) is list : xx = np . array ( x ) else : raise ValueError ( 'x=' + str ( x ) + ' must be a numpy.ndarray' ) if xx . ndim is not 1 : raise ValueError ( 'xx.dim=' + str ( xx . ndim ) + ' must be 1' ) # filter out NaN's if rm_nan : xx = xx [ np . logical_not ( np . isnan ( xx ) ) ] # compute basic statistics npoints = len ( xx ) ok = npoints > 0 result = { 'npoints' : npoints , 'minimum' : np . min ( xx ) if ok else 0 , 'percentile25' : np . percentile ( xx , 25 ) if ok else 0 , 'median' : np . percentile ( xx , 50 ) if ok else 0 , 'mean' : np . mean ( xx ) if ok else 0 , 'percentile75' : np . percentile ( xx , 75 ) if ok else 0 , 'maximum' : np . max ( xx ) if ok else 0 , 'std' : np . std ( xx ) if ok else 0 , 'robust_std' : robust_std ( xx ) if ok else 0 , 'percentile15' : np . percentile ( xx , 15.86553 ) if ok else 0 , 'percentile84' : np . percentile ( xx , 84.13447 ) if ok else 0 } if debug : print ( '>>> ========================================' ) print ( '>>> STATISTICAL SUMMARY:' ) print ( '>>> ----------------------------------------' ) print ( '>>> Number of points.........:' , result [ 'npoints' ] ) print ( '>>> Minimum..................:' , result [ 'minimum' ] ) print ( '>>> 1st Quartile.............:' , result [ 'percentile25' ] ) print ( '>>> Median...................:' , result [ 'median' ] ) print ( '>>> Mean.....................:' , result [ 'mean' ] ) print ( '>>> 3rd Quartile.............:' , result [ 'percentile75' ] ) print ( '>>> Maximum..................:' , result [ 'maximum' ] ) print ( '>>> ----------------------------------------' ) print ( '>>> Standard deviation.......:' , result [ 'std' ] ) print ( '>>> Robust standard deviation:' , result [ 'robust_std' ] ) print ( '>>> 0.1586553 percentile.....:' , result [ 'percentile15' ] ) print ( '>>> 0.8413447 percentile.....:' , result [ 'percentile84' ] ) print ( '>>> ========================================' ) return result
| 3,779
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/stats.py#L51-L126
|
[
"def",
"create_bundle",
"(",
"self",
",",
"bundleId",
",",
"data",
"=",
"None",
")",
":",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"url",
"=",
"self",
".",
"__get_base_bundle_url",
"(",
")",
"+",
"\"/\"",
"+",
"bundleId",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"'sourceLanguage'",
"]",
"=",
"'en'",
"data",
"[",
"'targetLanguages'",
"]",
"=",
"[",
"]",
"data",
"[",
"'notes'",
"]",
"=",
"[",
"]",
"data",
"[",
"'metadata'",
"]",
"=",
"{",
"}",
"data",
"[",
"'partner'",
"]",
"=",
"''",
"data",
"[",
"'segmentSeparatorPattern'",
"]",
"=",
"''",
"data",
"[",
"'noTranslationPattern'",
"]",
"=",
"''",
"json_data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"response",
"=",
"self",
".",
"__perform_rest_call",
"(",
"requestURL",
"=",
"url",
",",
"restType",
"=",
"'PUT'",
",",
"body",
"=",
"json_data",
",",
"headers",
"=",
"headers",
")",
"return",
"response"
] |
Fit a trace information table to a polynomial .
|
def fit_trace_polynomial ( trace , deg , axis = 0 ) : dispaxis = axis_to_dispaxis ( axis ) # FIT to a polynomial pfit = numpy . polyfit ( trace [ : , 0 ] , trace [ : , 1 ] , deg ) start = trace [ 0 , 0 ] stop = trace [ - 1 , 0 ] , return PolyTrace ( start , stop , axis , pfit )
| 3,780
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/trace/traces.py#L74-L94
|
[
"def",
"DeleteSubjects",
"(",
"self",
",",
"subjects",
",",
"sync",
"=",
"False",
")",
":",
"for",
"subject",
"in",
"subjects",
":",
"self",
".",
"DeleteSubject",
"(",
"subject",
",",
"sync",
"=",
"sync",
")"
] |
Return a humanized price
|
def price_humanized ( value , inst , currency = None ) : return ( natural_number_with_currency ( value , ugettext ( 'CZK' ) if currency is None else currency ) if value is not None else ugettext ( '(None)' ) )
| 3,781
|
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/humanized_helpers/__init__.py#L6-L11
|
[
"def",
"allconcat_ring",
"(",
"xs",
",",
"devices",
",",
"concat_axis",
")",
":",
"n",
"=",
"len",
"(",
"xs",
")",
"if",
"n",
"==",
"1",
":",
"return",
"xs",
"# [target, source]",
"parts",
"=",
"[",
"[",
"xs",
"[",
"target",
"]",
"if",
"target",
"==",
"source",
"else",
"None",
"for",
"source",
"in",
"xrange",
"(",
"n",
")",
"]",
"for",
"target",
"in",
"xrange",
"(",
"n",
")",
"]",
"for",
"distance",
"in",
"xrange",
"(",
"1",
",",
"n",
"//",
"2",
"+",
"1",
")",
":",
"for",
"target",
"in",
"xrange",
"(",
"n",
")",
":",
"source",
"=",
"(",
"target",
"+",
"distance",
")",
"%",
"n",
"if",
"parts",
"[",
"target",
"]",
"[",
"source",
"]",
"is",
"None",
":",
"with",
"tf",
".",
"device",
"(",
"devices",
"[",
"target",
"]",
")",
":",
"parts",
"[",
"target",
"]",
"[",
"source",
"]",
"=",
"tf",
".",
"identity",
"(",
"parts",
"[",
"(",
"target",
"+",
"1",
")",
"%",
"n",
"]",
"[",
"source",
"]",
")",
"source",
"=",
"(",
"target",
"-",
"distance",
")",
"%",
"n",
"if",
"parts",
"[",
"target",
"]",
"[",
"source",
"]",
"is",
"None",
":",
"with",
"tf",
".",
"device",
"(",
"devices",
"[",
"target",
"]",
")",
":",
"parts",
"[",
"target",
"]",
"[",
"source",
"]",
"=",
"tf",
".",
"identity",
"(",
"parts",
"[",
"(",
"target",
"-",
"1",
")",
"%",
"n",
"]",
"[",
"source",
"]",
")",
"return",
"mtf",
".",
"parallel",
"(",
"devices",
",",
"tf",
".",
"concat",
",",
"parts",
",",
"axis",
"=",
"[",
"concat_axis",
"]",
"*",
"n",
")"
] |
Obtain a unique identifier of the image .
|
def get_imgid ( self , img ) : imgid = img . filename ( ) # More heuristics here... # get FILENAME keyword, CHECKSUM, for example... hdr = self . get_header ( img ) if 'checksum' in hdr : return hdr [ 'checksum' ] if 'filename' in hdr : return hdr [ 'filename' ] if not imgid : imgid = repr ( img ) return imgid
| 3,782
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/datamodel.py#L164-L191
|
[
"def",
"set_calibration_duration",
"(",
"self",
",",
"dur",
")",
":",
"self",
".",
"bs_calibrator",
".",
"set_duration",
"(",
"dur",
")",
"self",
".",
"tone_calibrator",
".",
"set_duration",
"(",
"dur",
")"
] |
Log that the task has started .
|
def log_starting ( self ) : self . start_time = time . perf_counter ( ) logger . log ( "Starting '" , logger . cyan ( self . name ) , "'..." )
| 3,783
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L50-L53
|
[
"def",
"get_requests_session",
"(",
")",
":",
"session",
"=",
"requests",
".",
"sessions",
".",
"Session",
"(",
")",
"session",
".",
"mount",
"(",
"'http://'",
",",
"HTTPAdapter",
"(",
"pool_connections",
"=",
"25",
",",
"pool_maxsize",
"=",
"25",
",",
"pool_block",
"=",
"True",
")",
")",
"session",
".",
"mount",
"(",
"'https://'",
",",
"HTTPAdapter",
"(",
"pool_connections",
"=",
"25",
",",
"pool_maxsize",
"=",
"25",
",",
"pool_block",
"=",
"True",
")",
")",
"return",
"session"
] |
Log that this task is done .
|
def log_finished ( self ) : delta = time . perf_counter ( ) - self . start_time logger . log ( "Finished '" , logger . cyan ( self . name ) , "' after " , logger . magenta ( time_to_text ( delta ) ) )
| 3,784
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L56-L60
|
[
"def",
"items",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Preserve backwards compatibility",
"if",
"args",
":",
"return",
"item",
"(",
"*",
"args",
")",
"pillarenv",
"=",
"kwargs",
".",
"get",
"(",
"'pillarenv'",
")",
"if",
"pillarenv",
"is",
"None",
":",
"if",
"__opts__",
".",
"get",
"(",
"'pillarenv_from_saltenv'",
",",
"False",
")",
":",
"pillarenv",
"=",
"kwargs",
".",
"get",
"(",
"'saltenv'",
")",
"or",
"__opts__",
"[",
"'saltenv'",
"]",
"else",
":",
"pillarenv",
"=",
"__opts__",
"[",
"'pillarenv'",
"]",
"pillar_override",
"=",
"kwargs",
".",
"get",
"(",
"'pillar'",
")",
"pillar_enc",
"=",
"kwargs",
".",
"get",
"(",
"'pillar_enc'",
")",
"if",
"pillar_override",
"and",
"pillar_enc",
":",
"try",
":",
"pillar_override",
"=",
"salt",
".",
"utils",
".",
"crypt",
".",
"decrypt",
"(",
"pillar_override",
",",
"pillar_enc",
",",
"translate_newlines",
"=",
"True",
",",
"opts",
"=",
"__opts__",
",",
"valid_rend",
"=",
"__opts__",
"[",
"'decrypt_pillar_renderers'",
"]",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"'Failed to decrypt pillar override: {0}'",
".",
"format",
"(",
"exc",
")",
")",
"pillar",
"=",
"salt",
".",
"pillar",
".",
"get_pillar",
"(",
"__opts__",
",",
"__grains__",
",",
"__opts__",
"[",
"'id'",
"]",
",",
"pillar_override",
"=",
"pillar_override",
",",
"pillarenv",
"=",
"pillarenv",
")",
"return",
"pillar",
".",
"compile_pillar",
"(",
")"
] |
Call the function attached to the task .
|
def call_task_fn ( self ) : if not self . fn : return self . log_finished ( ) future = asyncio . Future ( ) future . add_done_callback ( lambda x : self . log_finished ( ) ) if inspect . iscoroutinefunction ( self . fn ) : f = asyncio . ensure_future ( self . fn ( ) ) f . add_done_callback ( lambda x : self . bind_end ( x . result ( ) , future ) ) else : self . bind_end ( self . fn ( ) , future ) return future
| 3,785
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L63-L77
|
[
"def",
"delete_logs",
"(",
"room",
")",
":",
"from",
"indico_chat",
".",
"plugin",
"import",
"ChatPlugin",
"base_url",
"=",
"ChatPlugin",
".",
"settings",
".",
"get",
"(",
"'log_url'",
")",
"if",
"not",
"base_url",
"or",
"room",
".",
"custom_server",
":",
"return",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"posixpath",
".",
"join",
"(",
"base_url",
",",
"'delete'",
")",
",",
"params",
"=",
"{",
"'cr'",
":",
"room",
".",
"jid",
"}",
")",
".",
"json",
"(",
")",
"except",
"(",
"RequestException",
",",
"ValueError",
")",
":",
"current_plugin",
".",
"logger",
".",
"exception",
"(",
"'Could not delete logs for %s'",
",",
"room",
".",
"jid",
")",
"return",
"if",
"not",
"response",
".",
"get",
"(",
"'success'",
")",
":",
"current_plugin",
".",
"logger",
".",
"warning",
"(",
"'Could not delete logs for %s: %s'",
",",
"room",
".",
"jid",
",",
"response",
".",
"get",
"(",
"'error'",
")",
")"
] |
Bind a TaskEndTransformer to a stream .
|
def bind_end ( self , stream , future ) : if not isinstance ( stream , Stream ) : future . set_result ( None ) else : stream . pipe ( TaskEndTransformer ( future ) )
| 3,786
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L80-L85
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"super",
"(",
"ImageMemberManager",
",",
"self",
")",
".",
"create",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"e",
".",
"http_status",
"==",
"403",
":",
"raise",
"exc",
".",
"UnsharableImage",
"(",
"\"You cannot share a public image.\"",
")",
"else",
":",
"raise"
] |
Start running dependencies .
|
async def start_deps ( self , deps ) : # Get only new dependencies deps = list ( filter ( lambda dep : dep not in self . called , deps ) ) self . called += deps # Start only existing dependencies runners = list ( filter ( lambda x : x and x . future , map ( lambda dep : pylp . start ( dep ) , deps ) ) ) if len ( runners ) != 0 : await asyncio . wait ( map ( lambda runner : runner . future , runners ) ) # Call the attached function future = self . call_task_fn ( ) if future : await future
| 3,787
|
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L88-L103
|
[
"def",
"login",
"(",
"self",
")",
":",
"access_token",
"=",
"self",
".",
"_get_access_token",
"(",
")",
"try",
":",
"super",
"(",
"IAMSession",
",",
"self",
")",
".",
"request",
"(",
"'POST'",
",",
"self",
".",
"_session_url",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'access_token'",
":",
"access_token",
"}",
")",
")",
".",
"raise_for_status",
"(",
")",
"except",
"RequestException",
":",
"raise",
"CloudantException",
"(",
"'Failed to exchange IAM token with Cloudant'",
")"
] |
Create a set from an iterable of members .
|
def frommembers ( cls , members = ( ) ) : return cls . fromint ( sum ( map ( cls . _map . __getitem__ , set ( members ) ) ) )
| 3,788
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L31-L33
|
[
"def",
"disconnect_controller",
"(",
"self",
",",
"vid",
",",
"pid",
",",
"serial",
")",
":",
"self",
".",
"lib",
".",
"tdDisconnectTellStickController",
"(",
"vid",
",",
"pid",
",",
"serial",
")"
] |
Create a set from an iterable of boolean evaluable items .
|
def frombools ( cls , bools = ( ) ) : return cls . fromint ( sum ( compress ( cls . _atoms , bools ) ) )
| 3,789
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L36-L38
|
[
"def",
"dsphdr",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"x",
"=",
"ctypes",
".",
"c_double",
"(",
"x",
")",
"y",
"=",
"ctypes",
".",
"c_double",
"(",
"y",
")",
"z",
"=",
"ctypes",
".",
"c_double",
"(",
"z",
")",
"jacobi",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
")",
"libspice",
".",
"dsphdr_c",
"(",
"x",
",",
"y",
",",
"z",
",",
"jacobi",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"jacobi",
")"
] |
Create a set from binary string .
|
def frombits ( cls , bits = '0' ) : if len ( bits ) > cls . _len : raise ValueError ( 'too many bits %r' % ( bits , ) ) return cls . fromint ( bits [ : : - 1 ] , 2 )
| 3,790
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L41-L45
|
[
"def",
"remove_stale_javascripts",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing stale javascripts ...\"",
")",
"for",
"js",
"in",
"JAVASCRIPTS_TO_REMOVE",
":",
"logger",
".",
"info",
"(",
"\"Unregistering JS %s\"",
"%",
"js",
")",
"portal",
".",
"portal_javascripts",
".",
"unregisterResource",
"(",
"js",
")"
] |
Yield the singleton for every set member .
|
def atoms ( self , reverse = False ) : if reverse : return filter ( self . __and__ , reversed ( self . _atoms ) ) return filter ( self . __and__ , self . _atoms )
| 3,791
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L77-L81
|
[
"def",
"load_draco",
"(",
"file_obj",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.drc'",
")",
"as",
"temp_drc",
":",
"temp_drc",
".",
"write",
"(",
"file_obj",
".",
"read",
"(",
")",
")",
"temp_drc",
".",
"flush",
"(",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.ply'",
")",
"as",
"temp_ply",
":",
"subprocess",
".",
"check_output",
"(",
"[",
"draco_decoder",
",",
"'-i'",
",",
"temp_drc",
".",
"name",
",",
"'-o'",
",",
"temp_ply",
".",
"name",
"]",
")",
"temp_ply",
".",
"seek",
"(",
"0",
")",
"kwargs",
"=",
"load_ply",
"(",
"temp_ply",
")",
"return",
"kwargs"
] |
Yield the singleton for every non - member .
|
def inatoms ( self , reverse = False ) : if reverse : return filterfalse ( self . __and__ , reversed ( self . _atoms ) ) return filterfalse ( self . __and__ , self . _atoms )
| 3,792
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L83-L87
|
[
"def",
"unpack_rawr_zip_payload",
"(",
"table_sources",
",",
"payload",
")",
":",
"# the io we get from S3 is streaming, so we can't seek on it, but zipfile",
"# seems to require that. so we buffer it all in memory. RAWR tiles are",
"# generally up to around 100MB in size, which should be safe to store in",
"# RAM.",
"from",
"tilequeue",
".",
"query",
".",
"common",
"import",
"Table",
"from",
"io",
"import",
"BytesIO",
"zfh",
"=",
"zipfile",
".",
"ZipFile",
"(",
"BytesIO",
"(",
"payload",
")",
",",
"'r'",
")",
"def",
"get_table",
"(",
"table_name",
")",
":",
"# need to extract the whole compressed file from zip reader, as it",
"# doesn't support .tell() on the filelike, which gzip requires.",
"data",
"=",
"zfh",
".",
"open",
"(",
"table_name",
",",
"'r'",
")",
".",
"read",
"(",
")",
"unpacker",
"=",
"Unpacker",
"(",
"file_like",
"=",
"BytesIO",
"(",
"data",
")",
")",
"source",
"=",
"table_sources",
"[",
"table_name",
"]",
"return",
"Table",
"(",
"source",
",",
"unpacker",
")",
"return",
"get_table"
] |
Yield combinations from start to self in short lexicographic order .
|
def powerset ( self , start = None , excludestart = False ) : if start is None : start = self . infimum other = self . atoms ( ) else : if self | start != self : raise ValueError ( '%r is no subset of %r' % ( start , self ) ) other = self . fromint ( self & ~ start ) . atoms ( ) return map ( self . frombitset , combos . shortlex ( start , list ( other ) ) )
| 3,793
|
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L89-L98
|
[
"def",
"unstub",
"(",
"*",
"objs",
")",
":",
"if",
"objs",
":",
"for",
"obj",
"in",
"objs",
":",
"mock_registry",
".",
"unstub",
"(",
"obj",
")",
"else",
":",
"mock_registry",
".",
"unstub_all",
"(",
")"
] |
Changes a given changed_fields on object and returns changed object .
|
def change ( obj , * * changed_fields ) : obj_field_names = { field . name for field in obj . _meta . fields } | { field . attname for field in obj . _meta . fields } | { 'pk' } for field_name , value in changed_fields . items ( ) : if field_name not in obj_field_names : raise ValueError ( "'{}' is an invalid field name" . format ( field_name ) ) setattr ( obj , field_name , value ) return obj
| 3,794
|
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L54-L68
|
[
"def",
"get_doc",
"(",
"logger",
"=",
"None",
",",
"plugin",
"=",
"None",
",",
"reporthook",
"=",
"None",
")",
":",
"from",
"ginga",
".",
"GingaPlugin",
"import",
"GlobalPlugin",
",",
"LocalPlugin",
"if",
"isinstance",
"(",
"plugin",
",",
"GlobalPlugin",
")",
":",
"plugin_page",
"=",
"'plugins_global'",
"plugin_name",
"=",
"str",
"(",
"plugin",
")",
"elif",
"isinstance",
"(",
"plugin",
",",
"LocalPlugin",
")",
":",
"plugin_page",
"=",
"'plugins_local'",
"plugin_name",
"=",
"str",
"(",
"plugin",
")",
"else",
":",
"plugin_page",
"=",
"None",
"plugin_name",
"=",
"None",
"try",
":",
"index_html",
"=",
"_download_rtd_zip",
"(",
"reporthook",
"=",
"reporthook",
")",
"# Download failed, use online resource",
"except",
"Exception",
"as",
"e",
":",
"url",
"=",
"'https://ginga.readthedocs.io/en/latest/'",
"if",
"plugin_name",
"is",
"not",
"None",
":",
"if",
"toolkit",
".",
"family",
".",
"startswith",
"(",
"'qt'",
")",
":",
"# This displays plugin docstring.",
"url",
"=",
"None",
"else",
":",
"# This redirects to online doc.",
"url",
"+=",
"'manual/{}/{}.html'",
".",
"format",
"(",
"plugin_page",
",",
"plugin_name",
")",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"error",
"(",
"str",
"(",
"e",
")",
")",
"# Use local resource",
"else",
":",
"pfx",
"=",
"'file:'",
"url",
"=",
"'{}{}'",
".",
"format",
"(",
"pfx",
",",
"index_html",
")",
"# https://github.com/rtfd/readthedocs.org/issues/2803",
"if",
"plugin_name",
"is",
"not",
"None",
":",
"url",
"+=",
"'#{}'",
".",
"format",
"(",
"plugin_name",
")",
"return",
"url"
] |
Changes a given changed_fields on object saves it and returns changed object .
|
def change_and_save ( obj , update_only_changed_fields = False , save_kwargs = None , * * changed_fields ) : from chamber . models import SmartModel change ( obj , * * changed_fields ) if update_only_changed_fields and not isinstance ( obj , SmartModel ) : raise TypeError ( 'update_only_changed_fields can be used only with SmartModel' ) save_kwargs = save_kwargs if save_kwargs is not None else { } if update_only_changed_fields : save_kwargs [ 'update_only_changed_fields' ] = True obj . save ( * * save_kwargs ) return obj
| 3,795
|
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L71-L86
|
[
"def",
"get_doc",
"(",
"logger",
"=",
"None",
",",
"plugin",
"=",
"None",
",",
"reporthook",
"=",
"None",
")",
":",
"from",
"ginga",
".",
"GingaPlugin",
"import",
"GlobalPlugin",
",",
"LocalPlugin",
"if",
"isinstance",
"(",
"plugin",
",",
"GlobalPlugin",
")",
":",
"plugin_page",
"=",
"'plugins_global'",
"plugin_name",
"=",
"str",
"(",
"plugin",
")",
"elif",
"isinstance",
"(",
"plugin",
",",
"LocalPlugin",
")",
":",
"plugin_page",
"=",
"'plugins_local'",
"plugin_name",
"=",
"str",
"(",
"plugin",
")",
"else",
":",
"plugin_page",
"=",
"None",
"plugin_name",
"=",
"None",
"try",
":",
"index_html",
"=",
"_download_rtd_zip",
"(",
"reporthook",
"=",
"reporthook",
")",
"# Download failed, use online resource",
"except",
"Exception",
"as",
"e",
":",
"url",
"=",
"'https://ginga.readthedocs.io/en/latest/'",
"if",
"plugin_name",
"is",
"not",
"None",
":",
"if",
"toolkit",
".",
"family",
".",
"startswith",
"(",
"'qt'",
")",
":",
"# This displays plugin docstring.",
"url",
"=",
"None",
"else",
":",
"# This redirects to online doc.",
"url",
"+=",
"'manual/{}/{}.html'",
".",
"format",
"(",
"plugin_page",
",",
"plugin_name",
")",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"error",
"(",
"str",
"(",
"e",
")",
")",
"# Use local resource",
"else",
":",
"pfx",
"=",
"'file:'",
"url",
"=",
"'{}{}'",
".",
"format",
"(",
"pfx",
",",
"index_html",
")",
"# https://github.com/rtfd/readthedocs.org/issues/2803",
"if",
"plugin_name",
"is",
"not",
"None",
":",
"url",
"+=",
"'#{}'",
".",
"format",
"(",
"plugin_name",
")",
"return",
"url"
] |
Changes a given changed_fields on each object in a given iterable saves objects and returns the changed objects .
|
def bulk_change_and_save ( iterable , update_only_changed_fields = False , save_kwargs = None , * * changed_fields ) : return [ change_and_save ( obj , update_only_changed_fields = update_only_changed_fields , save_kwargs = save_kwargs , * * changed_fields ) for obj in iterable ]
| 3,796
|
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L96-L105
|
[
"def",
"_check_registry_type",
"(",
"folder",
"=",
"None",
")",
":",
"folder",
"=",
"_registry_folder",
"(",
"folder",
")",
"default_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'registry_type.txt'",
")",
"try",
":",
"with",
"open",
"(",
"default_file",
",",
"\"r\"",
")",
"as",
"infile",
":",
"data",
"=",
"infile",
".",
"read",
"(",
")",
"data",
"=",
"data",
".",
"strip",
"(",
")",
"ComponentRegistry",
".",
"SetBackingStore",
"(",
"data",
")",
"except",
"IOError",
":",
"pass"
] |
Integrate a Gaussian profile .
|
def gauss_box_model ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * ( norm . cdf ( z2 ) - norm . cdf ( z1 ) )
| 3,797
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/modeling/gaussbox.py#L24-L29
|
[
"def",
"copy",
"(",
"self",
",",
"orig",
")",
":",
"# type: (PrimaryOrSupplementaryVD) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Volume Descriptor is already initialized'",
")",
"self",
".",
"version",
"=",
"orig",
".",
"version",
"self",
".",
"flags",
"=",
"orig",
".",
"flags",
"self",
".",
"system_identifier",
"=",
"orig",
".",
"system_identifier",
"self",
".",
"volume_identifier",
"=",
"orig",
".",
"volume_identifier",
"self",
".",
"escape_sequences",
"=",
"orig",
".",
"escape_sequences",
"self",
".",
"space_size",
"=",
"orig",
".",
"space_size",
"self",
".",
"set_size",
"=",
"orig",
".",
"set_size",
"self",
".",
"seqnum",
"=",
"orig",
".",
"seqnum",
"self",
".",
"log_block_size",
"=",
"orig",
".",
"log_block_size",
"self",
".",
"path_tbl_size",
"=",
"orig",
".",
"path_tbl_size",
"self",
".",
"path_table_location_le",
"=",
"orig",
".",
"path_table_location_le",
"self",
".",
"optional_path_table_location_le",
"=",
"orig",
".",
"optional_path_table_location_le",
"self",
".",
"path_table_location_be",
"=",
"orig",
".",
"path_table_location_be",
"self",
".",
"optional_path_table_location_be",
"=",
"orig",
".",
"optional_path_table_location_be",
"# Root dir record is a DirectoryRecord object, and we want this copy to hold",
"# onto exactly the same reference as the original",
"self",
".",
"root_dir_record",
"=",
"orig",
".",
"root_dir_record",
"self",
".",
"volume_set_identifier",
"=",
"orig",
".",
"volume_set_identifier",
"# publisher_identifier is a FileOrTextIdentifier object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"publisher_identifier",
"=",
"orig",
".",
"publisher_identifier",
"# preparer_identifier is a FileOrTextIdentifier object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"preparer_identifier",
"=",
"orig",
".",
"preparer_identifier",
"# application_identifier is a FileOrTextIdentifier object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"application_identifier",
"=",
"orig",
".",
"application_identifier",
"self",
".",
"copyright_file_identifier",
"=",
"orig",
".",
"copyright_file_identifier",
"self",
".",
"abstract_file_identifier",
"=",
"orig",
".",
"abstract_file_identifier",
"self",
".",
"bibliographic_file_identifier",
"=",
"orig",
".",
"bibliographic_file_identifier",
"# volume_creation_date is a VolumeDescriptorDate object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"volume_creation_date",
"=",
"orig",
".",
"volume_creation_date",
"# volume_expiration_date is a VolumeDescriptorDate object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"volume_expiration_date",
"=",
"orig",
".",
"volume_expiration_date",
"# volume_effective_date is a VolumeDescriptorDate object, and we want this copy to",
"# hold onto exactly the same reference as the original",
"self",
".",
"volume_effective_date",
"=",
"orig",
".",
"volume_effective_date",
"self",
".",
"file_structure_version",
"=",
"orig",
".",
"file_structure_version",
"self",
".",
"application_use",
"=",
"orig",
".",
"application_use",
"self",
".",
"_initialized",
"=",
"True"
] |
Derivative of the integral of a Gaussian profile .
|
def gauss_box_model_deriv ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm . cdf ( z2 ) - norm . cdf ( z1 ) fp2 = norm_pdf_t ( z2 ) fp1 = norm_pdf_t ( z1 ) dl = - amplitude / stddev * ( fp2 - fp1 ) ds = - amplitude / stddev * ( fp2 * z2 - fp1 * z1 ) dd = amplitude / stddev * ( fp2 + fp1 ) return da , dl , ds , dd
| 3,798
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/modeling/gaussbox.py#L32-L47
|
[
"def",
"with_headers",
"(",
"self",
",",
"headers",
")",
":",
"return",
"self",
".",
"replace",
"(",
"headers",
"=",
"_merge_maps",
"(",
"self",
".",
"headers",
",",
"headers",
")",
")"
] |
Find peaks in array .
|
def find_peaks_spectrum ( sx , nwinwidth , threshold = 0 , debugplot = 0 ) : if type ( sx ) is not np . ndarray : raise ValueError ( "sx=" + str ( sx ) + " must be a numpy.ndarray" ) elif sx . ndim is not 1 : raise ValueError ( "sx.ndim=" + str ( sx . ndim ) + " must be 1" ) sx_shape = sx . shape nmed = nwinwidth // 2 if debugplot >= 10 : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) print ( 'find_peaks_spectrum> nmed..........:' , nmed ) print ( 'find_peaks_spectrum> data_threshold:' , threshold ) print ( 'find_peaks_spectrum> the first and last' , nmed , 'pixels will be ignored' ) xpeaks = [ ] # list to store the peaks if sx_shape [ 0 ] < nwinwidth : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) raise ValueError ( 'sx.shape < nwinwidth' ) i = nmed while i < sx_shape [ 0 ] - nmed : if sx [ i ] > threshold : peak_ok = True j = 0 loop = True while loop : if sx [ i - nmed + j ] > sx [ i - nmed + j + 1 ] : peak_ok = False j += 1 loop = ( j < nmed ) and peak_ok if peak_ok : j = nmed + 1 loop = True while loop : if sx [ i - nmed + j - 1 ] < sx [ i - nmed + j ] : peak_ok = False j += 1 loop = ( j < nwinwidth ) and peak_ok if peak_ok : xpeaks . append ( i ) i += nwinwidth - 1 else : i += 1 else : i += 1 ixpeaks = np . array ( xpeaks ) if debugplot >= 10 : print ( 'find_peaks_spectrum> number of peaks found:' , len ( ixpeaks ) ) print ( ixpeaks ) return ixpeaks
| 3,799
|
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/peaks_spectrum.py#L19-L106
|
[
"def",
"_create_download_failed_message",
"(",
"exception",
",",
"url",
")",
":",
"message",
"=",
"'Failed to download from:\\n{}\\nwith {}:\\n{}'",
".",
"format",
"(",
"url",
",",
"exception",
".",
"__class__",
".",
"__name__",
",",
"exception",
")",
"if",
"_is_temporal_problem",
"(",
"exception",
")",
":",
"if",
"isinstance",
"(",
"exception",
",",
"requests",
".",
"ConnectionError",
")",
":",
"message",
"+=",
"'\\nPlease check your internet connection and try again.'",
"else",
":",
"message",
"+=",
"'\\nThere might be a problem in connection or the server failed to process '",
"'your request. Please try again.'",
"elif",
"isinstance",
"(",
"exception",
",",
"requests",
".",
"HTTPError",
")",
":",
"try",
":",
"server_message",
"=",
"''",
"for",
"elem",
"in",
"decode_data",
"(",
"exception",
".",
"response",
".",
"content",
",",
"MimeType",
".",
"XML",
")",
":",
"if",
"'ServiceException'",
"in",
"elem",
".",
"tag",
"or",
"'Message'",
"in",
"elem",
".",
"tag",
":",
"server_message",
"+=",
"elem",
".",
"text",
".",
"strip",
"(",
"'\\n\\t '",
")",
"except",
"ElementTree",
".",
"ParseError",
":",
"server_message",
"=",
"exception",
".",
"response",
".",
"text",
"message",
"+=",
"'\\nServer response: \"{}\"'",
".",
"format",
"(",
"server_message",
")",
"return",
"message"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.