idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
43,800
def get_whitelist_page ( self , page_number = None , page_size = None ) : params = { 'pageNumber' : page_number , 'pageSize' : page_size } resp = self . _client . get ( "whitelist" , params = params ) return Page . from_dict ( resp . json ( ) , content_type = Indicator )
Gets a paginated list of indicators that the user s company has whitelisted .
43,801
def get_related_indicators_page ( self , indicators = None , enclave_ids = None , page_size = None , page_number = None ) : params = { 'indicators' : indicators , 'enclaveIds' : enclave_ids , 'pageNumber' : page_number , 'pageSize' : page_size } resp = self . _client . get ( "indicators/related" , params = params ) return Page . from_dict ( resp . json ( ) , content_type = Indicator )
Finds all reports that contain any of the given indicators and returns correlated indicators from those reports .
43,802
def _get_indicators_for_report_page_generator ( self , report_id , start_page = 0 , page_size = None ) : get_page = functools . partial ( self . get_indicators_for_report_page , report_id = report_id ) return Page . get_page_generator ( get_page , start_page , page_size )
Creates a generator from the |get_indicators_for_report_page| method that returns each successive page .
43,803
def _get_related_indicators_page_generator ( self , indicators = None , enclave_ids = None , start_page = 0 , page_size = None ) : get_page = functools . partial ( self . get_related_indicators_page , indicators , enclave_ids ) return Page . get_page_generator ( get_page , start_page , page_size )
Creates a generator from the |get_related_indicators_page| method that returns each successive page .
43,804
def _get_whitelist_page_generator ( self , start_page = 0 , page_size = None ) : return Page . get_page_generator ( self . get_whitelist_page , start_page , page_size )
Creates a generator from the |get_whitelist_page| method that returns each successive page .
43,805
def points ( self , size = 1.0 , highlight = None , colorlist = None , opacity = 1.0 ) : if colorlist is None : colorlist = [ get_atom_color ( t ) for t in self . topology [ 'atom_types' ] ] if highlight is not None : if isinstance ( highlight , int ) : colorlist [ highlight ] = 0xff0000 if isinstance ( highlight , ( list , np . ndarray ) ) : for i in highlight : colorlist [ i ] = 0xff0000 sizes = [ size ] * len ( self . topology [ 'atom_types' ] ) points = self . add_representation ( 'points' , { 'coordinates' : self . coordinates . astype ( 'float32' ) , 'colors' : colorlist , 'sizes' : sizes , 'opacity' : opacity } ) def update ( self = self , points = points ) : self . update_representation ( points , { 'coordinates' : self . coordinates . astype ( 'float32' ) } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display the system as points .
43,806
def labels ( self , text = None , coordinates = None , colorlist = None , sizes = None , fonts = None , opacity = 1.0 ) : if coordinates is None : coordinates = self . coordinates l = len ( coordinates ) if text is None : if len ( self . topology . get ( 'atom_types' ) ) == l : text = [ self . topology [ 'atom_types' ] [ i ] + str ( i + 1 ) for i in range ( l ) ] else : text = [ str ( i + 1 ) for i in range ( l ) ] text_representation = self . add_representation ( 'text' , { 'coordinates' : coordinates , 'text' : text , 'colors' : colorlist , 'sizes' : sizes , 'fonts' : fonts , 'opacity' : opacity } ) def update ( self = self , text_representation = text_representation ) : self . update_representation ( text_representation , { 'coordinates' : coordinates } ) self . update_callbacks . append ( update )
Display atomic labels for the system
43,807
def remove_labels ( self ) : for rep_id in self . representations . keys ( ) : if self . representations [ rep_id ] [ 'rep_type' ] == 'text' and rep_id not in self . _axes_reps : self . remove_representation ( rep_id )
Remove all atomic labels from the system
43,808
def lines ( self ) : if "bonds" not in self . topology : return bond_start , bond_end = zip ( * self . topology [ 'bonds' ] ) bond_start = np . array ( bond_start ) bond_end = np . array ( bond_end ) color_array = np . array ( [ get_atom_color ( t ) for t in self . topology [ 'atom_types' ] ] ) lines = self . add_representation ( 'lines' , { 'startCoords' : self . coordinates [ bond_start ] , 'endCoords' : self . coordinates [ bond_end ] , 'startColors' : color_array [ bond_start ] . tolist ( ) , 'endColors' : color_array [ bond_end ] . tolist ( ) } ) def update ( self = self , lines = lines ) : bond_start , bond_end = zip ( * self . topology [ 'bonds' ] ) bond_start = np . array ( bond_start ) bond_end = np . array ( bond_end ) self . update_representation ( lines , { 'startCoords' : self . coordinates [ bond_start ] , 'endCoords' : self . coordinates [ bond_end ] } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display the system bonds as lines .
43,809
def ball_and_sticks ( self , ball_radius = 0.05 , stick_radius = 0.02 , colorlist = None , opacity = 1.0 ) : if colorlist is None : colorlist = [ get_atom_color ( t ) for t in self . topology [ 'atom_types' ] ] sizes = [ ball_radius ] * len ( self . topology [ 'atom_types' ] ) spheres = self . add_representation ( 'spheres' , { 'coordinates' : self . coordinates . astype ( 'float32' ) , 'colors' : colorlist , 'radii' : sizes , 'opacity' : opacity } ) def update ( self = self , spheres = spheres ) : self . update_representation ( spheres , { 'coordinates' : self . coordinates . astype ( 'float32' ) } ) self . update_callbacks . append ( update ) if 'bonds' in self . topology and self . topology [ 'bonds' ] is not None : start_idx , end_idx = zip ( * self . topology [ 'bonds' ] ) new_start_coords = [ ] new_end_coords = [ ] for bond_ind , bond in enumerate ( self . topology [ 'bonds' ] ) : trim_amt = ( ball_radius ** 2 - stick_radius ** 2 ) ** 0.5 if ball_radius > stick_radius else 0 start_coord = self . coordinates [ bond [ 0 ] ] end_coord = self . coordinates [ bond [ 1 ] ] vec = ( end_coord - start_coord ) / np . linalg . norm ( end_coord - start_coord ) new_start_coords . append ( start_coord + vec * trim_amt ) new_end_coords . append ( end_coord - vec * trim_amt ) cylinders = self . add_representation ( 'cylinders' , { 'startCoords' : np . array ( new_start_coords , dtype = 'float32' ) , 'endCoords' : np . array ( new_end_coords , dtype = 'float32' ) , 'colors' : [ 0xcccccc ] * len ( new_start_coords ) , 'radii' : [ stick_radius ] * len ( new_start_coords ) , 'opacity' : opacity } ) def update ( self = self , rep = cylinders , start_idx = start_idx , end_idx = end_idx ) : self . update_representation ( rep , { 'startCoords' : self . coordinates [ list ( start_idx ) ] , 'endCoords' : self . coordinates [ list ( end_idx ) ] } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display the system using a ball and stick representation .
43,810
def line_ribbon ( self ) : backbone = np . array ( self . topology [ 'atom_names' ] ) == 'CA' smoothline = self . add_representation ( 'smoothline' , { 'coordinates' : self . coordinates [ backbone ] , 'color' : 0xffffff } ) def update ( self = self , smoothline = smoothline ) : self . update_representation ( smoothline , { 'coordinates' : self . coordinates [ backbone ] } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display the protein secondary structure as a white lines that passes through the backbone chain .
43,811
def cylinder_and_strand ( self ) : top = self . topology in_helix = False helices_starts = [ ] helices_ends = [ ] coils = [ ] coil = [ ] for i , typ in enumerate ( top [ 'secondary_structure' ] ) : if typ == 'H' : if in_helix == False : helices_starts . append ( top [ 'residue_indices' ] [ i ] [ 0 ] ) in_helix = True coil . append ( top [ 'residue_indices' ] [ i ] [ 0 ] ) else : if in_helix == True : helices_ends . append ( top [ 'residue_indices' ] [ i ] [ 0 ] ) coil = [ ] coils . append ( coil ) in_helix = False coil . append ( top [ 'residue_indices' ] [ i ] [ 0 ] ) [ coil . append ( j ) for j in top [ 'residue_indices' ] [ i ] if top [ 'atom_names' ] [ j ] == 'CA' ] coil_representations = [ ] for control_points in coils : rid = self . add_representation ( 'smoothtube' , { 'coordinates' : self . coordinates [ control_points ] , 'radius' : 0.05 , 'resolution' : 4 , 'color' : 0xffffff } ) coil_representations . append ( rid ) start_idx , end_idx = helices_starts , helices_ends cylinders = self . add_representation ( 'cylinders' , { 'startCoords' : self . coordinates [ list ( start_idx ) ] , 'endCoords' : self . coordinates [ list ( end_idx ) ] , 'colors' : [ 0xffff00 ] * len ( self . coordinates ) , 'radii' : [ 0.15 ] * len ( self . coordinates ) } ) def update ( self = self , cylinders = cylinders , coils = coils , coil_representations = coil_representations , start_idx = start_idx , end_idx = end_idx , control_points = control_points ) : for i , control_points in enumerate ( coils ) : rid = self . update_representation ( coil_representations [ i ] , { 'coordinates' : self . coordinates [ control_points ] } ) self . update_representation ( cylinders , { 'startCoords' : self . coordinates [ list ( start_idx ) ] , 'endCoords' : self . coordinates [ list ( end_idx ) ] } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display the protein secondary structure as a white solid tube and the alpha - helices as yellow cylinders .
43,812
def cartoon ( self , cmap = None ) : top = self . topology geom = gg . GeomProteinCartoon ( gg . Aes ( xyz = self . coordinates , types = top [ 'atom_names' ] , secondary_type = top [ 'secondary_structure' ] ) , cmap = cmap ) primitives = geom . produce ( gg . Aes ( ) ) ids = [ self . add_representation ( r [ 'rep_type' ] , r [ 'options' ] ) for r in primitives ] def update ( self = self , geom = geom , ids = ids ) : primitives = geom . produce ( gg . Aes ( xyz = self . coordinates ) ) [ self . update_representation ( id_ , rep_options ) for id_ , rep_options in zip ( ids , primitives ) ] self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
Display a protein secondary structure as a pymol - like cartoon representation .
43,813
def add_isosurface ( self , function , isolevel = 0.3 , resolution = 32 , style = "wireframe" , color = 0xffffff ) : avail_styles = [ 'wireframe' , 'solid' , 'transparent' ] if style not in avail_styles : raise ValueError ( 'style must be in ' + str ( avail_styles ) ) area_min = self . coordinates . min ( axis = 0 ) - 0.2 area_max = self . coordinates . max ( axis = 0 ) + 0.2 x = np . linspace ( area_min [ 0 ] , area_max [ 0 ] , resolution ) y = np . linspace ( area_min [ 1 ] , area_max [ 1 ] , resolution ) z = np . linspace ( area_min [ 2 ] , area_max [ 2 ] , resolution ) xv , yv , zv = np . meshgrid ( x , y , z ) spacing = np . array ( ( area_max - area_min ) / resolution ) if isolevel >= 0 : triangles = marching_cubes ( function ( xv , yv , zv ) , isolevel ) else : triangles = marching_cubes ( - function ( xv , yv , zv ) , - isolevel ) if len ( triangles ) == 0 : return faces = [ ] verts = [ ] for i , t in enumerate ( triangles ) : faces . append ( [ i * 3 , i * 3 + 1 , i * 3 + 2 ] ) verts . extend ( t ) faces = np . array ( faces ) verts = area_min + spacing / 2 + np . array ( verts ) * spacing rep_id = self . add_representation ( 'surface' , { 'verts' : verts . astype ( 'float32' ) , 'faces' : faces . astype ( 'int32' ) , 'style' : style , 'color' : color } ) self . autozoom ( verts )
Add an isosurface to the current scene .
43,814
def add_isosurface_grid_data ( self , data , origin , extent , resolution , isolevel = 0.3 , scale = 10 , style = "wireframe" , color = 0xffffff ) : spacing = np . array ( extent / resolution ) / scale if isolevel >= 0 : triangles = marching_cubes ( data , isolevel ) else : triangles = marching_cubes ( - data , - isolevel ) faces = [ ] verts = [ ] for i , t in enumerate ( triangles ) : faces . append ( [ i * 3 , i * 3 + 1 , i * 3 + 2 ] ) verts . extend ( t ) faces = np . array ( faces ) verts = origin + spacing / 2 + np . array ( verts ) * spacing rep_id = self . add_representation ( 'surface' , { 'verts' : verts . astype ( 'float32' ) , 'faces' : faces . astype ( 'int32' ) , 'style' : style , 'color' : color } ) self . autozoom ( verts )
Add an isosurface to current scence using pre - computed data on a grid
43,815
def _refresh_token ( self ) : client_auth = requests . auth . HTTPBasicAuth ( self . api_key , self . api_secret ) post_data = { "grant_type" : "client_credentials" } response = requests . post ( self . auth , auth = client_auth , data = post_data , proxies = self . proxies ) self . last_response = response if 400 <= response . status_code < 600 : message = "{} {} Error (Trace-Id: {}): {}" . format ( response . status_code , "Client" if response . status_code < 500 else "Server" , self . _get_trace_id ( response ) , "unable to get token" ) raise HTTPError ( message , response = response ) self . token = response . json ( ) [ "access_token" ]
Retrieves the OAuth2 token generated by the user s API key and API secret . Sets the instance property token to this new token . If the current token is still live the server will simply return that .
43,816
def _get_headers ( self , is_json = False ) : headers = { "Authorization" : "Bearer " + self . _get_token ( ) } if self . client_type is not None : headers [ "Client-Type" ] = self . client_type if self . client_version is not None : headers [ "Client-Version" ] = self . client_version if self . client_metatag is not None : headers [ "Client-Metatag" ] = self . client_metatag if is_json : headers [ 'Content-Type' ] = 'application/json' return headers
Create headers dictionary for a request .
43,817
def _is_expired_token_response ( cls , response ) : EXPIRED_MESSAGE = "Expired oauth2 access token" INVALID_MESSAGE = "Invalid oauth2 access token" if response . status_code == 400 : try : body = response . json ( ) if str ( body . get ( 'error_description' ) ) in [ EXPIRED_MESSAGE , INVALID_MESSAGE ] : return True except : pass return False
Determine whether the given response indicates that the token is expired .
43,818
def request ( self , method , path , headers = None , params = None , data = None , ** kwargs ) : retry = self . retry attempted = False while not attempted or retry : base_headers = self . _get_headers ( is_json = method in [ "POST" , "PUT" ] ) if headers is not None : base_headers . update ( headers ) url = "{}/{}" . format ( self . base , path ) response = requests . request ( method = method , url = url , headers = base_headers , verify = self . verify , params = params , data = data , proxies = self . proxies , ** kwargs ) self . last_response = response attempted = True self . logger . debug ( "%s %s. Trace-Id: %s. Params: %s" , method , url , response . headers . get ( 'Trace-Id' ) , params ) if self . _is_expired_token_response ( response ) : self . _refresh_token ( ) elif retry and response . status_code == 429 : wait_time = ceil ( response . json ( ) . get ( 'waitTime' ) / 1000 ) self . logger . debug ( "Waiting %d seconds until next request allowed." % wait_time ) if wait_time <= self . max_wait_time : time . sleep ( wait_time ) else : retry = False else : retry = False if 400 <= response . status_code < 600 : resp_json = None try : resp_json = response . json ( ) except : pass if resp_json is not None and 'message' in resp_json : reason = resp_json [ 'message' ] else : reason = "unknown cause" message = "{} {} Error (Trace-Id: {}): {}" . format ( response . status_code , "Client" if response . status_code < 500 else "Server" , self . _get_trace_id ( response ) , reason ) raise HTTPError ( message , response = response ) return response
A wrapper around requests . request that handles boilerplate code specific to TruStar s API .
43,819
def isosurface_from_data ( data , isolevel , origin , spacing ) : spacing = np . array ( extent / resolution ) if isolevel >= 0 : triangles = marching_cubes ( data , isolevel ) else : triangles = marching_cubes ( - data , - isolevel ) faces = [ ] verts = [ ] for i , t in enumerate ( triangles ) : faces . append ( [ i * 3 , i * 3 + 1 , i * 3 + 2 ] ) verts . extend ( t ) faces = np . array ( faces ) verts = origin + spacing / 2 + np . array ( verts ) * spacing return verts , faces
Small wrapper to get directly vertices and faces to feed into programs
43,820
def _get_gtf_column ( column_name , gtf_path , df ) : if column_name in df . columns : return list ( df [ column_name ] ) else : raise ValueError ( "Missing '%s' in columns of %s, available: %s" % ( column_name , gtf_path , list ( df . columns ) ) )
Helper function which returns a dictionary column or raises an ValueError abou the absence of that column in a GTF file .
43,821
def load_transcript_fpkm_dict_from_gtf ( gtf_path , transcript_id_column_name = "reference_id" , fpkm_column_name = "FPKM" , feature_column_name = "feature" ) : df = gtfparse . read_gtf ( gtf_path , column_converters = { fpkm_column_name : float } ) transcript_ids = _get_gtf_column ( transcript_id_column_name , gtf_path , df ) fpkm_values = _get_gtf_column ( fpkm_column_name , gtf_path , df ) features = _get_gtf_column ( feature_column_name , gtf_path , df ) logging . info ( "Loaded %d rows from %s" % ( len ( transcript_ids ) , gtf_path ) ) logging . info ( "Found %s transcript entries" % sum ( feature == "transcript" for feature in features ) ) result = { transcript_id : float ( fpkm ) for ( transcript_id , fpkm , feature ) in zip ( transcript_ids , fpkm_values , features ) if ( ( transcript_id is not None ) and ( len ( transcript_id ) > 0 ) and ( feature == "transcript" ) ) } logging . info ( "Keeping %d transcript rows with reference IDs" % ( len ( result ) , ) ) return result
Load a GTF file generated by StringTie which contains transcript - level quantification of abundance . Returns a dictionary mapping Ensembl IDs of transcripts to FPKM values .
43,822
def predict_from_sequences ( self , sequences ) : sequence_dict = { seq : seq for seq in sequences } df = self . predict_from_named_sequences ( sequence_dict ) return df . rename ( columns = { "source_sequence_name" : "source_sequence" } )
Predict MHC ligands for sub - sequences of each input sequence .
43,823
def predict_from_variants ( self , variants , transcript_expression_dict = None , gene_expression_dict = None ) : variants = apply_variant_expression_filters ( variants , transcript_expression_dict = transcript_expression_dict , transcript_expression_threshold = self . min_transcript_expression , gene_expression_dict = gene_expression_dict , gene_expression_threshold = self . min_gene_expression ) effects = variants . effects ( raise_on_error = self . raise_on_error ) return self . predict_from_mutation_effects ( effects = effects , transcript_expression_dict = transcript_expression_dict , gene_expression_dict = gene_expression_dict )
Predict epitopes from a Variant collection filtering options and optional gene and transcript expression data .
43,824
def main ( args_list = None ) : args = parse_args ( args_list ) print ( "Topiary commandline arguments:" ) print ( args ) df = predict_epitopes_from_args ( args ) write_outputs ( df , args ) print ( "Total count: %d" % len ( df ) )
Script entry - point to predict neo - epitopes from genomic variants using Topiary .
43,825
def render_povray ( scene , filename = 'ipython' , width = 600 , height = 600 , antialiasing = 0.01 , extra_opts = { } ) : if not vapory_available : raise Exception ( "To render with povray, you need to have the vapory" " package installed." ) scene = normalize_scene ( scene ) scene . update ( extra_opts ) aspect = scene [ 'camera' ] [ 'aspect' ] up = np . dot ( rmatrixquaternion ( scene [ 'camera' ] [ 'quaternion' ] ) , [ 0 , 1 , 0 ] ) v_fov = scene [ 'camera' ] [ 'vfov' ] / 180.0 * np . pi h_fov = 2.0 * np . arctan ( np . tan ( v_fov / 2.0 ) * aspect ) / np . pi * 180 camera = vp . Camera ( 'location' , scene [ 'camera' ] [ 'location' ] , 'direction' , [ 0 , 0 , - 1 ] , 'sky' , up , 'look_at' , scene [ 'camera' ] [ 'target' ] , 'angle' , h_fov ) global_settings = [ ] if scene . get ( 'radiosity' , False ) : radiosity = vp . Radiosity ( 'brightness' , 2.0 , 'count' , 100 , 'error_bound' , 0.15 , 'gray_threshold' , 0.0 , 'low_error_factor' , 0.2 , 'minimum_reuse' , 0.015 , 'nearest_count' , 10 , 'recursion_limit' , 1 , 'adc_bailout' , 0.01 , 'max_sample' , 0.5 , 'media off' , 'normal off' , 'always_sample' , 1 , 'pretrace_start' , 0.08 , 'pretrace_end' , 0.01 ) light_sources = [ ] global_settings . append ( radiosity ) else : light_sources = [ vp . LightSource ( np . array ( [ 2 , 4 , - 3 ] ) * 1000 , 'color' , [ 1 , 1 , 1 ] ) , vp . LightSource ( np . array ( [ - 2 , - 4 , 3 ] ) * 1000 , 'color' , [ 1 , 1 , 1 ] ) , vp . LightSource ( np . array ( [ - 1 , 2 , 3 ] ) * 1000 , 'color' , [ 1 , 1 , 1 ] ) , vp . LightSource ( np . array ( [ 1 , - 2 , - 3 ] ) * 1000 , 'color' , [ 1 , 1 , 1 ] ) ] background = vp . Background ( [ 1 , 1 , 1 ] ) stuff = _generate_objects ( scene [ 'representations' ] ) scene = vp . Scene ( camera , objects = light_sources + stuff + [ background ] , global_settings = global_settings ) return scene . render ( filename , width = width , height = height , antialiasing = antialiasing )
Render the scene with povray for publication .
43,826
def rmatrixquaternion ( q ) : assert np . allclose ( math . sqrt ( np . dot ( q , q ) ) , 1.0 ) x , y , z , w = q xx = x * x xy = x * y xz = x * z xw = x * w yy = y * y yz = y * z yw = y * w zz = z * z zw = z * w r00 = 1.0 - 2.0 * ( yy + zz ) r01 = 2.0 * ( xy - zw ) r02 = 2.0 * ( xz + yw ) r10 = 2.0 * ( xy + zw ) r11 = 1.0 - 2.0 * ( xx + zz ) r12 = 2.0 * ( yz - xw ) r20 = 2.0 * ( xz - yw ) r21 = 2.0 * ( yz + xw ) r22 = 1.0 - 2.0 * ( xx + yy ) R = np . array ( [ [ r00 , r01 , r02 ] , [ r10 , r11 , r12 ] , [ r20 , r21 , r22 ] ] , float ) assert np . allclose ( np . linalg . det ( R ) , 1.0 ) return R
Create a rotation matrix from q quaternion rotation . Quaternions are typed as Numeric Python numpy . arrays of length 4 .
43,827
def rna_transcript_expression_dict_from_args ( args ) : if args . rna_transcript_fpkm_tracking_file : return load_cufflinks_fpkm_dict ( args . rna_transcript_fpkm_tracking_file ) elif args . rna_transcript_fpkm_gtf_file : return load_transcript_fpkm_dict_from_gtf ( args . rna_transcript_fpkm_gtf_file ) else : return None
Returns a dictionary mapping Ensembl transcript IDs to FPKM expression values or None if neither Cufflinks tracking file nor StringTie GTF file were specified .
43,828
def from_dict ( page , content_type = None ) : result = Page ( items = page . get ( 'items' ) , page_number = page . get ( 'pageNumber' ) , page_size = page . get ( 'pageSize' ) , total_elements = page . get ( 'totalElements' ) , has_next = page . get ( 'hasNext' ) ) if content_type is not None : if not issubclass ( content_type , ModelBase ) : raise ValueError ( "'content_type' must be a subclass of ModelBase." ) result . items = [ content_type . from_dict ( item ) for item in result . items ] return result
Create a |Page| object from a dictionary . This method is intended for internal use to construct a |Page| object from the body of a response json from a paginated endpoint .
43,829
def to_dict ( self , remove_nones = False ) : items = [ ] for item in self . items : if hasattr ( item , 'to_dict' ) : items . append ( item . to_dict ( remove_nones = remove_nones ) ) else : items . append ( item ) return { 'items' : items , 'pageNumber' : self . page_number , 'pageSize' : self . page_size , 'totalElements' : self . total_elements , 'hasNext' : self . has_next }
Creates a dictionary representation of the page .
43,830
def get_page_generator ( func , start_page = 0 , page_size = None ) : page_number = start_page more_pages = True while more_pages : page = func ( page_number = page_number , page_size = page_size ) yield page more_pages = page . has_more_pages ( ) page_number += 1
Constructs a generator for retrieving pages from a paginated endpoint . This method is intended for internal use .
43,831
def serialize_to_dict ( dictionary ) : retval = { } for k , v in dictionary . items ( ) : if isinstance ( v , dict ) : retval [ k ] = serialize_to_dict ( v ) else : if isinstance ( v , np . ndarray ) : if v . dtype == 'float64' : v = v . astype ( 'float32' ) retval [ k ] = encode_numpy ( v ) else : retval [ k ] = v return retval
Make a json - serializable dictionary from input dictionary by converting non - serializable data types such as numpy arrays .
43,832
def make_graph ( pkg ) : ignore = [ 'argparse' , 'pip' , 'setuptools' , 'wsgiref' ] pkg_deps = recursive_dependencies ( pkg_resources . Requirement . parse ( pkg ) ) dependencies = { key : { } for key in pkg_deps if key not in ignore } installed_packages = pkg_resources . working_set versions = { package . key : package . version for package in installed_packages } for package in dependencies : try : dependencies [ package ] [ 'version' ] = versions [ package ] except KeyError : warnings . warn ( "{} is not installed so we cannot compute " "resources for its dependencies." . format ( package ) , PackageNotInstalledWarning ) dependencies [ package ] [ 'version' ] = None for package in dependencies : package_data = research_package ( package , dependencies [ package ] [ 'version' ] ) dependencies [ package ] . update ( package_data ) return OrderedDict ( [ ( package , dependencies [ package ] ) for package in sorted ( dependencies . keys ( ) ) ] )
Returns a dictionary of information about pkg & its recursive deps .
43,833
def get_report_details ( self , report_id , id_type = None ) : params = { 'idType' : id_type } resp = self . _client . get ( "reports/%s" % report_id , params = params ) return Report . from_dict ( resp . json ( ) )
Retrieves a report by its ID . Internal and external IDs are both allowed .
43,834
def get_reports_page ( self , is_enclave = None , enclave_ids = None , tag = None , excluded_tags = None , from_time = None , to_time = None ) : distribution_type = None if is_enclave : distribution_type = DistributionType . ENCLAVE elif not is_enclave : distribution_type = DistributionType . COMMUNITY if enclave_ids is None : enclave_ids = self . enclave_ids params = { 'from' : from_time , 'to' : to_time , 'distributionType' : distribution_type , 'enclaveIds' : enclave_ids , 'tags' : tag , 'excludedTags' : excluded_tags } resp = self . _client . get ( "reports" , params = params ) result = Page . from_dict ( resp . json ( ) , content_type = Report ) return result
Retrieves a page of reports filtering by time window distribution type enclave association and tag . The results are sorted by updated time . This method does not take page_number and page_size parameters . Instead each successive page must be found by adjusting the from_time and to_time parameters .
43,835
def submit_report ( self , report ) : if report . is_enclave is None : report . is_enclave = True if report . enclave_ids is None : if report . is_enclave : report . enclave_ids = self . enclave_ids else : report . enclave_ids = [ ] if report . is_enclave and len ( report . enclave_ids ) == 0 : raise Exception ( "Cannot submit a report of distribution type 'ENCLAVE' with an empty set of enclaves." ) if report . time_began is None : report . set_time_began ( datetime . now ( ) ) data = json . dumps ( report . to_dict ( ) ) resp = self . _client . post ( "reports" , data = data , timeout = 60 ) report_id = resp . content if isinstance ( report_id , bytes ) : report_id = report_id . decode ( 'utf-8' ) report . id = report_id return report
Submits a report .
43,836
def update_report ( self , report ) : if report . id is not None : id_type = IdType . INTERNAL report_id = report . id elif report . external_id is not None : id_type = IdType . EXTERNAL report_id = report . external_id else : raise Exception ( "Cannot update report without either an ID or an external ID." ) report_dict = { k : v for k , v in report . to_dict ( ) . items ( ) if k != 'reportId' } params = { 'idType' : id_type } data = json . dumps ( report . to_dict ( ) ) self . _client . put ( "reports/%s" % report_id , data = data , params = params ) return report
Updates the report identified by the report . id field ; if this field does not exist then report . external_id will be used if it exists . Any other fields on report that are not None will overwrite values on the report in TruSTAR s system . Any fields that are None will simply be ignored ; their values will be unchanged .
43,837
def delete_report ( self , report_id , id_type = None ) : params = { 'idType' : id_type } self . _client . delete ( "reports/%s" % report_id , params = params )
Deletes the report with the given ID .
43,838
def get_correlated_report_ids ( self , indicators ) : params = { 'indicators' : indicators } resp = self . _client . get ( "reports/correlate" , params = params ) return resp . json ( )
DEPRECATED! Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators .
43,839
def get_correlated_reports_page ( self , indicators , enclave_ids = None , is_enclave = True , page_size = None , page_number = None ) : if is_enclave : distribution_type = DistributionType . ENCLAVE else : distribution_type = DistributionType . COMMUNITY params = { 'indicators' : indicators , 'enclaveIds' : enclave_ids , 'distributionType' : distribution_type , 'pageNumber' : page_number , 'pageSize' : page_size } resp = self . _client . get ( "reports/correlated" , params = params ) return Page . from_dict ( resp . json ( ) , content_type = Report )
Retrieves a page of all TruSTAR reports that contain the searched indicators .
43,840
def search_reports_page ( self , search_term = None , enclave_ids = None , from_time = None , to_time = None , tags = None , excluded_tags = None , page_size = None , page_number = None ) : body = { 'searchTerm' : search_term } params = { 'enclaveIds' : enclave_ids , 'from' : from_time , 'to' : to_time , 'tags' : tags , 'excludedTags' : excluded_tags , 'pageSize' : page_size , 'pageNumber' : page_number } resp = self . _client . post ( "reports/search" , params = params , data = json . dumps ( body ) ) page = Page . from_dict ( resp . json ( ) , content_type = Report ) return page
Search for reports containing a search term .
43,841
def _get_reports_page_generator ( self , is_enclave = None , enclave_ids = None , tag = None , excluded_tags = None , from_time = None , to_time = None ) : get_page = functools . partial ( self . get_reports_page , is_enclave , enclave_ids , tag , excluded_tags ) return get_time_based_page_generator ( get_page = get_page , get_next_to_time = lambda x : x . items [ - 1 ] . updated if len ( x . items ) > 0 else None , from_time = from_time , to_time = to_time )
Creates a generator from the |get_reports_page| method that returns each successive page .
43,842
def get_reports ( self , is_enclave = None , enclave_ids = None , tag = None , excluded_tags = None , from_time = None , to_time = None ) : return Page . get_generator ( page_generator = self . _get_reports_page_generator ( is_enclave , enclave_ids , tag , excluded_tags , from_time , to_time ) )
Uses the |get_reports_page| method to create a generator that returns each successive report as a trustar report object .
43,843
def _get_correlated_reports_page_generator ( self , indicators , enclave_ids = None , is_enclave = True , start_page = 0 , page_size = None ) : get_page = functools . partial ( self . get_correlated_reports_page , indicators , enclave_ids , is_enclave ) return Page . get_page_generator ( get_page , start_page , page_size )
Creates a generator from the |get_correlated_reports_page| method that returns each successive page .
43,844
def get_correlated_reports ( self , indicators , enclave_ids = None , is_enclave = True ) : return Page . get_generator ( page_generator = self . _get_correlated_reports_page_generator ( indicators , enclave_ids , is_enclave ) )
Uses the |get_correlated_reports_page| method to create a generator that returns each successive report .
43,845
def _search_reports_page_generator ( self , search_term = None , enclave_ids = None , from_time = None , to_time = None , tags = None , excluded_tags = None , start_page = 0 , page_size = None ) : get_page = functools . partial ( self . search_reports_page , search_term , enclave_ids , from_time , to_time , tags , excluded_tags ) return Page . get_page_generator ( get_page , start_page , page_size )
Creates a generator from the |search_reports_page| method that returns each successive page .
43,846
def search_reports ( self , search_term = None , enclave_ids = None , from_time = None , to_time = None , tags = None , excluded_tags = None ) : return Page . get_generator ( page_generator = self . _search_reports_page_generator ( search_term , enclave_ids , from_time , to_time , tags , excluded_tags ) )
Uses the |search_reports_page| method to create a generator that returns each successive report .
43,847
def parse_boolean ( value ) : if value is None : return None if isinstance ( value , bool ) : return value if isinstance ( value , string_types ) : value = value . lower ( ) if value == 'false' : return False if value == 'true' : return True raise ValueError ( "Could not convert value to boolean: {}" . format ( value ) )
Coerce a value to boolean .
43,848
def config_from_file ( config_file_path , config_role ) : ext = os . path . splitext ( config_file_path ) [ - 1 ] if ext in [ '.conf' , '.ini' ] : config_parser = configparser . RawConfigParser ( ) config_parser . read ( config_file_path ) roles = dict ( config_parser ) elif ext in [ '.json' , '.yml' , '.yaml' ] : with open ( config_file_path , 'r' ) as f : roles = yaml . safe_load ( f ) else : raise IOError ( "Unrecognized filetype for config file '%s'" % config_file_path ) if config_role in roles : config = dict ( roles [ config_role ] ) else : raise KeyError ( "Could not find role %s" % config_role ) if 'enclave_ids' in config : if isinstance ( config [ 'enclave_ids' ] , int ) : config [ 'enclave_ids' ] = str ( config [ 'enclave_ids' ] ) if isinstance ( config [ 'enclave_ids' ] , string_types ) : config [ 'enclave_ids' ] = config [ 'enclave_ids' ] . split ( ',' ) elif not isinstance ( config [ 'enclave_ids' ] , list ) : raise Exception ( "'enclave_ids' must be a list or a comma-separated list" ) config [ 'enclave_ids' ] = [ str ( x ) . strip ( ) for x in config [ 'enclave_ids' ] if x is not None ] else : config [ 'enclave_ids' ] = [ ] return config
Create a configuration dictionary from a config file section . This dictionary is what the TruStar class constructor ultimately requires .
43,849
def get_version ( self ) : result = self . _client . get ( "version" ) . content if isinstance ( result , bytes ) : result = result . decode ( 'utf-8' ) return result . strip ( '\n' )
Get the version number of the API .
43,850
def get_user_enclaves ( self ) : resp = self . _client . get ( "enclaves" ) return [ EnclavePermissions . from_dict ( enclave ) for enclave in resp . json ( ) ]
Gets the list of enclaves that the user has access to .
43,851
def get_request_quotas ( self ) : resp = self . _client . get ( "request-quotas" ) return [ RequestQuota . from_dict ( quota ) for quota in resp . json ( ) ]
Gets the request quotas for the user s company .
43,852
def configure_logging ( ) : if not parse_boolean ( os . environ . get ( 'DISABLE_TRUSTAR_LOGGING' ) ) : dictConfig ( DEFAULT_LOGGING_CONFIG ) error_logger = logging . getLogger ( "error" ) def log_exception ( exc_type , exc_value , exc_traceback ) : error_logger . error ( "Uncaught exception" , exc_info = ( exc_type , exc_value , exc_traceback ) ) sys . excepthook = log_exception
Initialize logging configuration to defaults . If the environment variable DISABLE_TRUSTAR_LOGGING is set to true this will be ignored .
43,853
def from_enclave ( cls , enclave ) : return EnclavePermissions ( id = enclave . id , name = enclave . name , type = enclave . type )
Create an |EnclavePermissions| object from an |Enclave| object .
43,854
def get_enclave_tags ( self , report_id , id_type = None ) : params = { 'idType' : id_type } resp = self . _client . get ( "reports/%s/tags" % report_id , params = params ) return [ Tag . from_dict ( indicator ) for indicator in resp . json ( ) ]
Retrieves all enclave tags present in a specific report .
43,855
def add_enclave_tag ( self , report_id , name , enclave_id , id_type = None ) : params = { 'idType' : id_type , 'name' : name , 'enclaveId' : enclave_id } resp = self . _client . post ( "reports/%s/tags" % report_id , params = params ) return str ( resp . content )
Adds a tag to a specific report for a specific enclave .
43,856
def delete_enclave_tag ( self , report_id , tag_id , id_type = None ) : params = { 'idType' : id_type } self . _client . delete ( "reports/%s/tags/%s" % ( report_id , tag_id ) , params = params )
Deletes a tag from a specific report in a specific enclave .
43,857
def get_all_enclave_tags ( self , enclave_ids = None ) : params = { 'enclaveIds' : enclave_ids } resp = self . _client . get ( "reports/tags" , params = params ) return [ Tag . from_dict ( indicator ) for indicator in resp . json ( ) ]
Retrieves all tags present in the given enclaves . If the enclave list is empty the tags returned include all tags for all enclaves the user has access to .
43,858
def add_indicator_tag ( self , indicator_value , name , enclave_id ) : data = { 'value' : indicator_value , 'tag' : { 'name' : name , 'enclaveId' : enclave_id } } resp = self . _client . post ( "indicators/tags" , data = json . dumps ( data ) ) return Tag . from_dict ( resp . json ( ) )
Adds a tag to a specific indicator for a specific enclave .
43,859
def delete_indicator_tag ( self , indicator_value , tag_id ) : params = { 'value' : indicator_value } self . _client . delete ( "indicators/tags/%s" % tag_id , params = params )
Deletes a tag from a specific indicator in a specific enclave .
43,860
def to_dict ( self , remove_nones = False ) : if remove_nones : d = super ( ) . to_dict ( remove_nones = True ) else : d = { 'name' : self . name , 'id' : self . id , 'enclaveId' : self . enclave_id } return d
Creates a dictionary representation of the tag .
43,861
def check_required_columns ( df , filename , required_columns ) : available_columns = set ( df . columns ) for column_name in required_columns : if column_name not in available_columns : raise ValueError ( "FPKM tracking file %s missing column '%s'" % ( filename , column_name ) )
Ensure that all required columns are present in the given dataframe otherwise raise an exception .
43,862
def topology_mdtraj ( traj ) : import mdtraj as md top = { } top [ 'atom_types' ] = [ a . element . symbol for a in traj . topology . atoms ] top [ 'atom_names' ] = [ a . name for a in traj . topology . atoms ] top [ 'bonds' ] = [ ( a . index , b . index ) for a , b in traj . topology . bonds ] top [ 'secondary_structure' ] = md . compute_dssp ( traj [ 0 ] ) [ 0 ] top [ 'residue_types' ] = [ r . name for r in traj . topology . residues ] top [ 'residue_indices' ] = [ [ a . index for a in r . atoms ] for r in traj . topology . residues ] return top
Generate topology spec for the MolecularViewer from mdtraj .
43,863
def encode_numpy ( array ) : return { 'data' : base64 . b64encode ( array . data ) . decode ( 'utf8' ) , 'type' : array . dtype . name , 'shape' : array . shape }
Encode a numpy array as a base64 encoded string to be JSON serialized .
43,864
def enable_notebook ( verbose = 0 ) : libs = [ 'objexporter.js' , 'ArcballControls.js' , 'filesaver.js' , 'base64-arraybuffer.js' , 'context.js' , 'chemview.js' , 'three.min.js' , 'jquery-ui.min.js' , 'context.standalone.css' , 'chemview_widget.js' , 'trajectory_controls_widget.js' , "layout_widget.js" , "components/jquery-fullscreen/jquery.fullscreen.js" , 'scales.js' ] fns = [ resource_filename ( 'chemview' , os . path . join ( 'static' , f ) ) for f in libs ] [ install_nbextension ( fn , verbose = verbose , overwrite = True , user = True ) for fn in fns ]
Enable IPython notebook widgets to be displayed .
43,865
def from_string ( cls , string ) : for attr in dir ( cls ) : value = getattr ( cls , attr ) if value == string : return value logger . warning ( "{} is not a valid enum value for {}." . format ( string , cls . __name__ ) ) return string
Simply logs a warning if the desired enum value is not found .
43,866
def format_cli ( self , value ) : if value is None or ( self . type == 'flag' and not value ) : return None pass_as_bits = text_type ( self . pass_as or self . default_pass_as ) . split ( ) env = dict ( name = self . name , value = value , v = value ) return [ bit . format ( ** env ) for bit in pass_as_bits ]
Build a single parameter argument .
43,867
def validate ( yaml , raise_exc = True ) : data = read_yaml ( yaml ) validator = get_validator ( ) errors = list ( validator . iter_errors ( data ) ) if errors and raise_exc : raise ValidationErrors ( errors ) return errors
Validate the given YAML document and return a list of errors .
43,868
def build_parameters ( self ) : param_bits = [ ] for name in self . parameters : param_bits . extend ( self . build_parameter_by_name ( name ) or [ ] ) return param_bits
Build the CLI command line from the parameter values .
43,869
def get_step_by ( self , ** kwargs ) : if not kwargs : return None for index , step in enumerate ( self . steps . values ( ) ) : extended_step = dict ( step . serialize ( ) , index = index ) if all ( item in extended_step . items ( ) for item in kwargs . items ( ) ) : return step return None
Get the first step that matches all the passed named arguments .
43,870
def parse ( yaml , validate = True ) : data = read_yaml ( yaml ) if validate : from . validation import validate validate ( data , raise_exc = True ) return Config . parse ( data )
Parse the given YAML data into a Config object optionally validating it first .
43,871
def build_command ( self , parameter_values , command = None ) : command = ( command or self . command ) values = dict ( self . get_parameter_defaults ( include_flags = False ) , ** parameter_values ) parameter_map = ParameterMap ( parameters = self . parameters , values = values ) return build_command ( command , parameter_map )
Build the command for this step using the given parameter values .
43,872
def lint_file ( file_path ) : with open ( file_path , 'r' ) as yaml : try : return lint ( yaml ) except Exception as e : lr = LintResult ( ) lr . add_error ( 'could not parse YAML: %s' % e , exception = e ) return lr
Validate & lint file_path and return a LintResult .
43,873
def update ( self ) : currencies = self . get_currencies ( ) currency_objects = { } for code , name in currencies : currency_objects [ code ] , created = Currency . objects . get_or_create ( code = code , defaults = { 'name' : name } ) if created : logger . info ( 'currency: %s created' , code ) existing = ExchangeRate . objects . values ( 'source__code' , 'target__code' , 'id' ) existing = { ( d [ 'source__code' ] , d [ 'target__code' ] ) : d [ 'id' ] for d in existing } usd_exchange_rates = dict ( self . get_exchangerates ( 'USD' ) ) updates = [ ] inserts = [ ] for source in currencies : for target in currencies : rate = self . _get_rate_through_usd ( source . code , target . code , usd_exchange_rates ) exchange_rate = ExchangeRate ( source = currency_objects [ source . code ] , target = currency_objects [ target . code ] , rate = rate ) if ( source . code , target . code ) in existing : exchange_rate . id = existing [ ( source . code , target . code ) ] updates . append ( exchange_rate ) logger . debug ( 'exchange rate updated %s/%s=%s' % ( source , target , rate ) ) else : inserts . append ( exchange_rate ) logger . debug ( 'exchange rate created %s/%s=%s' % ( source , target , rate ) ) logger . info ( 'exchange rates updated for %s' % source . code ) logger . info ( "Updating %s rows" % len ( updates ) ) update_many ( updates ) logger . info ( "Inserting %s rows" % len ( inserts ) ) insert_many ( inserts ) logger . info ( 'saved rates to db' )
Actual update process goes here using auxialary get_currencies and get_exchangerates methods . This method creates or updates corresponding Currency and ExchangeRate models
43,874
def redirect ( url , code = None ) : if not code : code = 303 if request . get ( 'SERVER_PROTOCOL' ) == "HTTP/1.1" else 302 res = response . copy ( cls = HTTPResponse ) res . status = code res . body = "" res . set_header ( 'Location' , urljoin ( request . url , url ) ) raise res
Aborts execution and causes a 303 or 302 redirect depending on the HTTP protocol version .
43,875
def _file_iter_range ( fp , offset , bytes , maxread = 1024 * 1024 ) : fp . seek ( offset ) while bytes > 0 : part = fp . read ( min ( bytes , maxread ) ) if not part : break bytes -= len ( part ) yield part
Yield chunks from a range in a file . No chunk is bigger than maxread .
43,876
def debug ( mode = True ) : global DEBUG if mode : warnings . simplefilter ( 'default' ) DEBUG = bool ( mode )
Change the debug level . There is only one debug level supported at the moment .
43,877
def build ( self , _name , * anons , ** query ) : builder = self . builder . get ( _name ) if not builder : raise RouteBuildError ( "No route with that name." , _name ) try : for i , value in enumerate ( anons ) : query [ 'anon%d' % i ] = value url = '' . join ( [ f ( query . pop ( n ) ) if n else f for ( n , f ) in builder ] ) return url if not query else url + '?' + urlencode ( query ) except KeyError : raise RouteBuildError ( 'Missing URL argument: %r' % _e ( ) . args [ 0 ] )
Build an URL by filling the wildcards in a rule .
43,878
def getunicode ( self , name , default = None , encoding = None ) : try : return self . _fix ( self [ name ] , encoding ) except ( UnicodeError , KeyError ) : return default
Return the value as a unicode string or the default .
43,879
def load_dict ( self , source , namespace = '' , make_namespaces = False ) : stack = [ ( namespace , source ) ] while stack : prefix , source = stack . pop ( ) if not isinstance ( source , dict ) : raise TypeError ( 'Source is not a dict (r)' % type ( key ) ) for key , value in source . items ( ) : if not isinstance ( key , str ) : raise TypeError ( 'Key is not a string (%r)' % type ( key ) ) full_key = prefix + '.' + key if prefix else key if isinstance ( value , dict ) : stack . append ( ( full_key , value ) ) if make_namespaces : self [ full_key ] = self . Namespace ( self , full_key ) else : self [ full_key ] = value return self
Import values from a dictionary structure . Nesting can be used to represent namespaces .
43,880
def meta_get ( self , key , metafield , default = None ) : return self . _meta . get ( key , { } ) . get ( metafield , default )
Return the value of a meta field for a key .
43,881
def meta_set ( self , key , metafield , value ) : self . _meta . setdefault ( key , { } ) [ metafield ] = value if key in self : self [ key ] = self [ key ]
Set the meta field for a key to a new value . This triggers the on - change handler for existing keys .
43,882
def lookup ( self , name ) : if name not in self . cache or DEBUG : for path in self . path : fpath = os . path . join ( path , name ) if os . path . isfile ( fpath ) : if self . cachemode in ( 'all' , 'found' ) : self . cache [ name ] = fpath return fpath if self . cachemode == 'all' : self . cache [ name ] = None return self . cache [ name ]
Search for a resource and return an absolute file path or None .
43,883
def open ( self , name , mode = 'r' , * args , ** kwargs ) : fname = self . lookup ( name ) if not fname : raise IOError ( "Resource %r not found." % name ) return self . opener ( fname , mode = mode , * args , ** kwargs )
Find a resource and return a file object or raise IOError .
43,884
def render ( self , * args , ** kwargs ) : env = { } stdout = [ ] for dictarg in args : env . update ( dictarg ) env . update ( kwargs ) self . execute ( stdout , env ) return '' . join ( stdout )
Render the template using keyword arguments as local variables .
43,885
def convert_values ( args_list ) : rate_map = get_rates ( map ( itemgetter ( 1 , 2 ) , args_list ) ) value_map = { } for value , source , target in args_list : args = ( value , source , target ) if source == target : value_map [ args ] = value else : value_map [ args ] = value * rate_map [ ( source , target ) ] return value_map
convert_value in bulk .
43,886
def convert_value ( value , source_currency , target_currency ) : if source_currency == target_currency : return value rate = get_rate ( source_currency , target_currency ) return value * rate
Converts the price of a currency to another one using exchange rates
43,887
def convert ( price , currency ) : value = convert_value ( price . value , price . currency , currency ) return Price ( value , currency )
Shorthand function converts a price object instance of a source currency to target currency
43,888
def import_class ( class_path ) : try : from django . utils . importlib import import_module module_name = '.' . join ( class_path . split ( "." ) [ : - 1 ] ) mod = import_module ( module_name ) return getattr ( mod , class_path . split ( "." ) [ - 1 ] ) except Exception , detail : raise ImportError ( detail )
imports and returns given class string .
43,889
def insert_many ( objects , using = "default" ) : if not objects : return import django . db . models from django . db import connections from django . db import transaction con = connections [ using ] model = objects [ 0 ] . __class__ fields = [ f for f in model . _meta . fields if not isinstance ( f , django . db . models . AutoField ) ] parameters = [ ] for o in objects : params = tuple ( f . get_db_prep_save ( f . pre_save ( o , True ) , connection = con ) for f in fields ) parameters . append ( params ) table = model . _meta . db_table column_names = "," . join ( con . ops . quote_name ( f . column ) for f in fields ) placeholders = "," . join ( ( "%s" , ) * len ( fields ) ) con . cursor ( ) . executemany ( "insert into %s (%s) values (%s)" % ( table , column_names , placeholders ) , parameters ) transaction . commit_unless_managed ( using = using )
Insert list of Django objects in one SQL query . Objects must be of the same Django model . Note that save is not called and signals on the model are not raised .
43,890
def _timesheets_callback ( self , callback ) : def call ( * args , ** kwargs ) : return_values = [ ] for timesheet in self : attr = getattr ( timesheet , callback ) if callable ( attr ) : result = attr ( * args , ** kwargs ) else : result = attr return_values . append ( result ) return return_values return call
Call a method on all the timesheets aggregate the return values in a list and return it .
43,891
def get_new_timesheets_contents ( self ) : popular_aliases = self . get_popular_aliases ( ) template = [ '# Recently used aliases:' ] if popular_aliases : contents = '\n' . join ( template + [ '# ' + entry for entry , usage in popular_aliases ] ) else : contents = '' return contents
Return the initial text to be inserted in new timesheets .
43,892
def status ( ctx , date , f , pushed ) : try : timesheet_collection = get_timesheet_collection_for_context ( ctx , f ) except ParseError as e : ctx . obj [ 'view' ] . err ( e ) else : ctx . obj [ 'view' ] . show_status ( timesheet_collection . entries . filter ( date , regroup = ctx . obj [ 'settings' ] [ 'regroup_entries' ] , pushed = False if not pushed else None ) )
Shows the summary of what s going to be committed to the server .
43,893
def edit ( ctx , file_to_edit , previous_file ) : timesheet_collection = None autofill = not bool ( file_to_edit ) and previous_file == 0 if not file_to_edit : file_to_edit = ctx . obj [ 'settings' ] . get_entries_file_path ( False ) if autofill : try : timesheet_collection = get_timesheet_collection_for_context ( ctx , file_to_edit ) except ParseError : pass else : t = timesheet_collection . latest ( ) if ctx . obj [ 'settings' ] [ 'auto_add' ] != Settings . AUTO_ADD_OPTIONS [ 'NO' ] : auto_fill_days = ctx . obj [ 'settings' ] [ 'auto_fill_days' ] if auto_fill_days : t . prefill ( auto_fill_days , limit = None ) t . save ( ) timesheet_files = list ( reversed ( TimesheetCollection . get_files ( file_to_edit , previous_file ) ) ) if previous_file >= len ( timesheet_files ) : ctx . fail ( "Couldn't find the requested previous file for `%s`." % file_to_edit ) expanded_file_to_edit = list ( timesheet_files ) [ previous_file ] editor = ctx . obj [ 'settings' ] [ 'editor' ] edit_kwargs = { 'filename' : expanded_file_to_edit , 'extension' : '.tks' } if editor : edit_kwargs [ 'editor' ] = editor click . edit ( ** edit_kwargs ) try : timesheet_collection = get_timesheet_collection_for_context ( ctx , file_to_edit ) except ParseError as e : ctx . obj [ 'view' ] . err ( e ) else : ctx . obj [ 'view' ] . show_status ( timesheet_collection . entries . filter ( regroup = True , pushed = False ) )
Opens your timesheet file in your favourite editor .
43,894
def filter_from_alias ( self , alias , backend = None ) : def alias_filter ( key_item ) : key , item = key_item return ( ( alias is None or alias in key ) and ( backend is None or item . backend == backend ) ) items = six . moves . filter ( alias_filter , six . iteritems ( self ) ) aliases = collections . OrderedDict ( sorted ( items , key = lambda a : a [ 0 ] . lower ( ) ) ) return aliases
Return aliases that start with the given alias optionally filtered by backend .
43,895
def clean_aliases ( ctx , force_yes ) : inactive_aliases = [ ] for ( alias , mapping ) in six . iteritems ( aliases_database ) : if mapping . mapping is None : continue project = ctx . obj [ 'projects_db' ] . get ( mapping . mapping [ 0 ] , mapping . backend ) if ( project is None or not project . is_active ( ) or ( mapping . mapping [ 1 ] is not None and project . get_activity ( mapping . mapping [ 1 ] ) is None ) ) : inactive_aliases . append ( ( ( alias , mapping ) , project ) ) if not inactive_aliases : ctx . obj [ 'view' ] . msg ( "No inactive aliases found." ) return if not force_yes : confirm = ctx . obj [ 'view' ] . clean_inactive_aliases ( inactive_aliases ) if force_yes or confirm : ctx . obj [ 'settings' ] . remove_aliases ( [ item [ 0 ] for item in inactive_aliases ] ) ctx . obj [ 'settings' ] . write_config ( ) ctx . obj [ 'view' ] . msg ( "%d inactive aliases have been successfully" " cleaned." % len ( inactive_aliases ) )
Removes aliases from your config file that point to inactive projects .
43,896
def list_ ( ctx ) : plugins = plugins_registry . get_plugins ( ) click . echo ( "\n" . join ( [ "%s (%s)" % p for p in plugins . items ( ) ] ) )
Lists installed plugins .
43,897
def install ( ctx , plugin ) : ensure_inside_venv ( ctx ) plugin_name = get_plugin_name ( plugin ) try : info = get_plugin_info ( plugin_name ) except NameError : echo_error ( "Plugin {} could not be found." . format ( plugin ) ) sys . exit ( 1 ) except ValueError as e : echo_error ( "Unable to retrieve plugin info. " "Error was:\n\n {}" . format ( e ) ) sys . exit ( 1 ) try : installed_version = pkg_resources . get_distribution ( plugin_name ) . version except pkg_resources . DistributionNotFound : installed_version = None if installed_version is not None and info [ 'version' ] == installed_version : click . echo ( "You already have the latest version of {} ({})." . format ( plugin , info [ 'version' ] ) ) return pinned_plugin = '{0}=={1}' . format ( plugin_name , info [ 'version' ] ) try : run_command ( [ sys . executable , '-m' , 'pip' , 'install' , pinned_plugin ] ) except subprocess . CalledProcessError as e : echo_error ( "Error when trying to install plugin {}. " "Error was:\n\n {}" . format ( plugin , e ) ) sys . exit ( 1 ) echo_success ( "Plugin {} {} installed successfully." . format ( plugin , info [ 'version' ] ) )
Install the given plugin .
43,898
def uninstall ( ctx , plugin ) : ensure_inside_venv ( ctx ) if plugin not in get_installed_plugins ( ) : echo_error ( "Plugin {} does not seem to be installed." . format ( plugin ) ) sys . exit ( 1 ) plugin_name = get_plugin_name ( plugin ) try : run_command ( [ sys . executable , '-m' , 'pip' , 'uninstall' , '-y' , plugin_name ] ) except subprocess . CalledProcessError as e : echo_error ( "Error when trying to uninstall plugin {}. Error message " "was:\n\n{}" . format ( plugin , e . output . decode ( ) ) ) sys . exit ( 1 ) else : echo_success ( "Plugin {} uninstalled successfully." . format ( plugin ) )
Uninstall the given plugin .
43,899
def hours ( self ) : if not isinstance ( self . duration , tuple ) : return self . duration if self . duration [ 1 ] is None : return 0 time_start = self . get_start_time ( ) if time_start is None : return 0 now = datetime . datetime . now ( ) time_start = now . replace ( hour = time_start . hour , minute = time_start . minute , second = 0 ) time_end = now . replace ( hour = self . duration [ 1 ] . hour , minute = self . duration [ 1 ] . minute , second = 0 ) total_time = time_end - time_start total_hours = total_time . seconds / 3600.0 return total_hours
Return the number of hours this entry has lasted . If the duration is a tuple with a start and an end time the difference between the two times will be calculated . If the duration is a number it will be returned as - is .