text
stringlengths
0
1.05M
meta
dict
__all__ = ['process_file', 'process_files', 'process_from_web', 'process_sif_strs'] import requests import logging from .processor import SifProcessor from .minerva_client import get_sif_filenames_to_ids, default_map_name logger = logging.getLogger(__name__) base_url = ('https://git-r3lab.uni.lu/covid/models/-/raw/master/' 'Executable%20Modules/SBML_qual_build/sif/') def process_file(filename, model_id, map_name=default_map_name): """Get statements by processing a single local SIF file. Parameters ---------- filename : str A name (or path) of a local SIF file to process. model_id : int ID of a model corresponding to file content. Model ID is needed to find relevant references. map_name : str A name of a disease map to process. Returns ------- sp : indra.source.minerva.SifProcessor An instance of a SifProcessor with extracted INDRA statements. """ return process_files({model_id: filename}, map_name) def process_files(ids_to_filenames, map_name=default_map_name): """Get statements by processing one or more local SIF files. Parameters ---------- ids_to_file_names : dict A dictionary mapping model IDs to files containing model content as SIF. Model IDs are needed to find relevant references. map_name : str A name of a disease map to process. Returns ------- sp : indra.source.minerva.SifProcessor An instance of a SifProcessor with extracted INDRA statements. """ model_id_to_sif_strs = {} for model_id, filename in ids_to_filenames.items(): with open(filename, 'r') as f: sif_strs = f.readlines() model_id_to_sif_strs[model_id] = sif_strs return process_sif_strs(model_id_to_sif_strs, map_name) def process_from_web(filenames='all', map_name=default_map_name): """Get statements by processing remote SIF files. Parameters ---------- filenames : list or str('all') Filenames for models that need to be processed (for full list of available models see https://git-r3lab.uni.lu/covid/models/-/tree/master/ Executable%20Modules/SBML_qual_build/sif). If set to 'all' (default), then all available models will be processed. map_name : str A name of a disease map to process. Returns ------- sp : indra.source.minerva.SifProcessor An instance of a SifProcessor with extracted INDRA statements. """ filenames_to_ids = get_sif_filenames_to_ids(map_name=map_name) if filenames == 'all': filenames = list(filenames_to_ids.keys()) model_id_to_sif_strs = {} for fname in filenames: model_id = filenames_to_ids[fname] url = base_url + fname res = requests.get(url) if res.status_code == 200: sif_strs = res.text.split('\n') model_id_to_sif_strs[model_id] = sif_strs else: logger.warning('Could not get content from file %s, skipping ' 'model %d' % (fname, model_id)) return process_sif_strs(model_id_to_sif_strs, map_name) def process_sif_strs(model_id_to_sif_strs, map_name=default_map_name): sp = SifProcessor(model_id_to_sif_strs, map_name=map_name) sp.extract_statements() return sp
{ "repo_name": "bgyori/indra", "path": "indra/sources/minerva/api.py", "copies": "3", "size": "3356", "license": "bsd-2-clause", "hash": 5369605590107938000, "line_mean": 33.2448979592, "line_max": 75, "alpha_frac": 0.637067938, "autogenerated": false, "ratio": 3.5475687103594082, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5684636648359408, "avg_score": null, "num_lines": null }
__all__ = ['process_from_web', 'process_tsv', 'process_df'] import pandas import logging from .processor import VirhostnetProcessor logger = logging.getLogger(__name__) vhn_url = ('http://virhostnet.prabi.fr:9090/psicquic/webservices/current/' 'search/query/') data_columns = [ 'host_grounding', 'vir_grounding', 'host_mnemonic', 'vir_mnemonic', 'host_mnemonic2', 'vir_mnemonic2', 'exp_method', 'dash', 'publication', 'host_tax', 'vir_tax', 'int_type', 'source', 'source_id', 'score' ] def process_from_web(query=None, up_web_fallback=False): """Process host-virus interactions from the VirHostNet website. Parameters ---------- query : Optional[str] A query that constrains the results to a given subset of the VirHostNet database. Example: "taxid:2697049" to search for interactions for SARS-CoV-2. If not provided, By default, the "*" query is used which returns the full database. Returns ------- VirhostnetProcessor A VirhostnetProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ # Search for everything to get the full download by default url = vhn_url + ('*' if query is None else query) logger.info('Processing VirHostNet data from %s' % url) df = pandas.read_csv(url, delimiter='\t', names=data_columns, header=None) return process_df(df, up_web_fallback=up_web_fallback) def process_tsv(fname, up_web_fallback=False): """Process a TSV data file obtained from VirHostNet. Parameters ---------- fname : str The path to the VirHostNet tabular data file (in the same format as the web service). Returns ------- VirhostnetProcessor A VirhostnetProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ df = pandas.read_csv(fname, delimiter='\t', names=data_columns, header=None) return process_df(df, up_web_fallback=up_web_fallback) def process_df(df, up_web_fallback=False): """Process a VirHostNet pandas DataFrame. Parameters ---------- df : pandas.DataFrame A DataFrame representing VirHostNet interactions (in the same format as the web service). Returns ------- VirhostnetProcessor A VirhostnetProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ vp = VirhostnetProcessor(df, up_web_fallback=up_web_fallback) vp.extract_statements() return vp
{ "repo_name": "bgyori/indra", "path": "indra/sources/virhostnet/api.py", "copies": "4", "size": "2641", "license": "bsd-2-clause", "hash": 5441213230625490000, "line_mean": 30.4404761905, "line_max": 79, "alpha_frac": 0.6535403256, "autogenerated": false, "ratio": 3.7836676217765044, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6437207947376504, "avg_score": null, "num_lines": null }
__all__ = ['process_from_webservice'] import logging import requests from .processor import RlimspProcessor logger = logging.getLogger(__name__) RLIMSP_URL = 'https://research.bioinformatics.udel.edu/itextmine/api/data/rlims/' class RLIMSP_Error(Exception): pass def process_from_webservice(id_val, id_type='pmcid', source='pmc', with_grounding=True): """Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ if with_grounding: fmt = '%s.normed/%s/%s' else: fmt = '%s/%s/%s' resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) return rp
{ "repo_name": "sorgerlab/belpy", "path": "indra/sources/rlimsp/api.py", "copies": "1", "size": "1639", "license": "mit", "hash": 7350573427751497000, "line_mean": 27.7543859649, "line_max": 81, "alpha_frac": 0.6266015863, "autogenerated": false, "ratio": 3.4872340425531916, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9612765881313612, "avg_score": 0.0002139495079161318, "num_lines": 57 }
__all__ = ['process_from_webservice', 'process_from_json_file', 'process_from_jsonish_str'] import json import logging import requests from .processor import RlimspProcessor logger = logging.getLogger(__name__) RLIMSP_URL = ('https://research.bioinformatics.udel.edu/itextmine/api/data/' 'rlims/') class RLIMSP_Error(Exception): pass def process_from_webservice(id_val, id_type='pmcid', source='pmc'): """Return an output from RLIMS-p for the given PubMed ID or PMC ID. The web service is documented at: https://research.bioinformatics.udel.edu/itextmine/api/. The /data/rlims URL endpoint is extended with three additional elements: /{collection}/{key}/{value} where collection is "medline" or "pmc", key is "pmid" or "pmcid", and value is a specific PMID or PMCID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or PMID, with no prefix, of the paper to be "read". Corresponds to the "value" argument of the REST API. id_type : Optional[str] Either 'pmid' or 'pmcid'. The default is 'pmcid'. Corresponds to the "key" argument of the REST API. source : Optional[str] Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. Corresponds to the "collection" argument of the REST API. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ resp = requests.get(RLIMSP_URL + '%s/%s/%s' % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) rp.extract_statements() return rp def process_from_json_file(filename, doc_id_type=None): """Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- filename : str Path to the JSON file. doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ with open(filename, 'rt') as f: lines = f.readlines() json_list = [] for line in lines: json_list.append(json.loads(line)) rp = RlimspProcessor(json_list, doc_id_type=doc_id_type) rp.extract_statements() return rp def process_from_jsonish_str(jsonish_str, doc_id_type=None): """Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- jsonish_str : str The contents of one of the not-quite-json files you can find here: https://hershey.dbi.udel.edu/textmining/export doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ lines = jsonish_str.splitlines() json_list = [] for line in lines: json_list.append(json.loads(line)) rp = RlimspProcessor(json_list, doc_id_type=doc_id_type) rp.extract_statements() return rp
{ "repo_name": "johnbachman/belpy", "path": "indra/sources/rlimsp/api.py", "copies": "4", "size": "3956", "license": "mit", "hash": -247285457095936320, "line_mean": 33.701754386, "line_max": 94, "alpha_frac": 0.6514155713, "autogenerated": false, "ratio": 3.5384615384615383, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6189877109761539, "avg_score": null, "num_lines": null }
__all__ = ['process_gene_gene', 'process_gene_gene_from_web', 'process_gene_disease', 'process_gene_disease_from_web', 'process_chemical_disease', 'process_chemical_disease_from_web', 'process_chemical_gene', 'process_chemical_gene_from_web', 'process_from_files', 'process_from_web'] import pandas as pd import logging from .processor import GnbrProcessor base_url = 'https://zenodo.org/record/3459420/files' logger = logging.getLogger(__name__) def process_gene_gene(part1_path: str, part2_path: str, indicator_only: bool = True) -> GnbrProcessor: """Process gene–gene interactions. Parameters ---------- part1_path : Path to the first dataset which contains dependency paths and themes. part2_path : Path to the second dataset which contains dependency paths and entity pairs. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_files(part1_path, part2_path, 'gene', 'gene', indicator_only=indicator_only) def process_chemical_gene(part1_path: str, part2_path: str, indicator_only: bool = True) -> GnbrProcessor: """Process chemical–gene interactions. Parameters ---------- part1_path : Path to the first dataset of dependency paths and themes. part2_path : Path to the second dataset of dependency paths and entity pairs. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_files(part1_path, part2_path, 'chemical', 'gene', indicator_only=indicator_only) def process_gene_disease(part1_path: str, part2_path: str, indicator_only: bool = True) -> GnbrProcessor: """Process gene–disease interactions. Parameters ---------- part1_path : Path to the first dataset which contains dependency paths and themes. part2_path : Path to the second dataset which contains dependency paths and entity pairs. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_files(part1_path, part2_path, 'gene', 'disease', indicator_only=indicator_only) def process_chemical_disease(part1_path: str, part2_path: str, indicator_only: bool = True) \ -> GnbrProcessor: """Process chemical–disease interactions. Parameters ---------- part1_path : Path to the first dataset which contains dependency paths and themes. part2_path : Path to the second dataset which contains dependency paths and entity pairs. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_files(part1_path, part2_path, 'chemical', 'disease', indicator_only=indicator_only) def process_from_files(part1_path: str, part2_path: str, first_type: str, second_type: str, indicator_only: bool = True) \ -> GnbrProcessor: """Loading the databases from the given files. Parameters ---------- part1_path : Path to the first dataset which contains dependency paths and themes. part2_path : Path to the second dataset which contains dependency paths and themes. first_type : Type of the first agent. second_type : Type of the second agent. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ logger.info(f'Loading part 1 table from {part1_path}') df1: pd.DataFrame = pd.read_csv(part1_path, sep='\t') logger.info(f'Loading part 2 table from {part2_path}') df2: pd.DataFrame = pd.read_csv(part2_path, sep='\t', header=None) gp: GnbrProcessor = GnbrProcessor(df1, df2, first_type, second_type, indicator_only=indicator_only) gp.extract_stmts() return gp def process_gene_gene_from_web(indicator_only: bool = True) -> GnbrProcessor: """Call process_gene_gene function on the GNBR datasets. Parameters ---------- indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_web('gene', 'gene', indicator_only=indicator_only) def process_chemical_gene_from_web(indicator_only: bool = True) \ -> GnbrProcessor: """Call process_chemical_gene function on the GNBR datasets. Parameters ---------- indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_web('chemical', 'gene', indicator_only=indicator_only) def process_gene_disease_from_web(indicator_only: bool = True) -> GnbrProcessor: """Call process_gene_disease function on the GNBR datasets. Parameters ---------- indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_web('gene', 'disease', indicator_only=indicator_only) def process_chemical_disease_from_web(indicator_only: bool = True)\ -> GnbrProcessor: """Call process_chemical_disease function on the GNBR datasets. Parameters ---------- indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ return process_from_web('chemical', 'disease', indicator_only=indicator_only) def process_from_web(first_type, second_type, indicator_only: bool = True)\ -> GnbrProcessor: """Loading the databases from the given urls. Parameters ---------- first_type : Type of the first agent. second_type : Type of the second agent. indicator_only : A switch to filter the data which is part of the flagship path set for each theme. Returns ------- : A GnbrProcessor object which contains a list of extracted INDRA Statements in its statements attribute. """ fname1 = (f'{base_url}/part-i-{first_type}-{second_type}-path-theme-' f'distributions.txt.gz') fname2 = (f'{base_url}/part-ii-dependency-paths-{first_type}-{second_type}' f'-sorted-with-themes.txt.gz') return process_from_files(fname1, fname2, first_type, second_type, indicator_only=indicator_only)
{ "repo_name": "sorgerlab/indra", "path": "indra/sources/gnbr/api.py", "copies": "1", "size": "8167", "license": "bsd-2-clause", "hash": 3976432687185107000, "line_mean": 31.64, "line_max": 80, "alpha_frac": 0.6254442946, "autogenerated": false, "ratio": 4.184102564102564, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5309546858702564, "avg_score": null, "num_lines": null }
__all__ = ['process_jsonld_file', 'process_jsonld'] import json import logging from indra.sources.hume import processor logger = logging.getLogger(__name__) def process_jsonld_file(fname): """Process a JSON-LD file in the new format to extract Statements. Parameters ---------- fname : str The path to the JSON-LD file to be processed. Returns ------- indra.sources.hume.HumeProcessor A HumeProcessor instance, which contains a list of INDRA Statements as its statements attribute. """ with open(fname, 'r', encoding='utf-8') as fh: json_dict = json.load(fh) return process_jsonld(json_dict) def process_jsonld(jsonld): """Process a JSON-LD string in the new format to extract Statements. Parameters ---------- jsonld : dict The JSON-LD object to be processed. Returns ------- indra.sources.hume.HumeProcessor A HumeProcessor instance, which contains a list of INDRA Statements as its statements attribute. """ hp = processor.HumeJsonLdProcessor(jsonld) hp.extract_relations() hp.extract_events() return hp
{ "repo_name": "johnbachman/belpy", "path": "indra/sources/hume/api.py", "copies": "1", "size": "1161", "license": "mit", "hash": -4979411150592801000, "line_mean": 24.2391304348, "line_max": 75, "alpha_frac": 0.6503014643, "autogenerated": false, "ratio": 3.935593220338983, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5085894684638983, "avg_score": null, "num_lines": null }
__all__ = ['process_pc_neighborhood', 'process_pc_pathsbetween', 'process_pc_pathsfromto', 'process_owl', 'process_owl_str', 'process_model'] import itertools from pybiopax import model_from_owl_file, model_from_owl_str, \ model_from_pc_query from .processor import BiopaxProcessor default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid', 'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg', 'intact', 'inoh', 'humancyc', 'hprd', 'drugbank', 'dip', 'corum'] def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in its model attribute and a list of extracted INDRA Statements from the model in its statements attribute. """ model = model_from_pc_query('neighborhood', source=gene_names, limit=neighbor_limit, datasource=database_filter) if model is not None: return process_model(model) def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): """Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ if not block_size: model = model_from_pc_query('pathsbetween', source=gene_names, limit=neighbor_limit, datasource=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = model_from_pc_query('pathsfromto', source=source_genes, target=target_genes, limit=neighbor_limit, datasource=database_filter) if model is not None: return process_model(model) def process_owl(owl_filename): """Returns a BiopaxProcessor for a BioPAX OWL file. Parameters ---------- owl_filename : str The name of the OWL file to process. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = model_from_owl_file(owl_filename) return process_model(model) def process_owl_str(owl_str): """Returns a BiopaxProcessor for a BioPAX OWL file. Parameters ---------- owl_str : str The string content of an OWL file to process. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = model_from_owl_str(owl_str) return process_model(model) def process_model(model): """Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ bp = BiopaxProcessor(model) bp.process_all() return bp
{ "repo_name": "johnbachman/belpy", "path": "indra/sources/biopax/api.py", "copies": "1", "size": "7641", "license": "mit", "hash": 8432073332287070000, "line_mean": 35.5598086124, "line_max": 79, "alpha_frac": 0.6109148017, "autogenerated": false, "ratio": 4.02793885081708, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.513885365251708, "avg_score": null, "num_lines": null }
__all__ = ['process_text', 'reground_texts'] import os import tqdm import math import pickle import logging import requests from indra.util import batch_iter logger = logging.getLogger(__name__) def process_text(text, webservice): """Process a given text with an Eidos webservice at the given address. Note that in most cases this function should not be used directly, rather, used indirectly by calling `indra.sources.eidos.process_text` with the webservice parameter. Parameters ---------- text : str The text to be read using Eidos. webservice : str The address where the Eidos web service is running, e.g., http://localhost:9000. Returns ------- dict A JSON dict of the results from the Eidos webservice. """ res = requests.post('%s/process_text' % webservice, json={'text': text}) res.raise_for_status() json_dict = res.json() return json_dict def reground_texts(texts, ont_yml, webservice, topk=10, is_canonicalized=False, filter=True, cache_path=None): """Ground concept texts given an ontology with an Eidos web service. Parameters ---------- texts : list[str] A list of concept texts to ground. ont_yml : str A serialized YAML string representing the ontology. webservice : str The address where the Eidos web service is running, e.g., http://localhost:9000. topk : Optional[int] The number of top scoring groundings to return. Default: 10 is_canonicalized : Optional[bool] If True, the texts are assumed to be canonicalized. If False, Eidos will canonicalize the texts which yields much better groundings but is slower. Default: False filter : Optional[bool] If True, Eidos filters the ontology to remove determiners from examples and other similar operations. Should typically be set to True. Default: True Returns ------- dict A JSON dict of the results from the Eidos webservice. """ all_results = [] grounding_cache = {} if cache_path: if os.path.exists(cache_path): with open(cache_path, 'rb') as fh: grounding_cache = pickle.load(fh) logger.info('Loaded %d groundings from cache' % len(grounding_cache)) texts_to_ground = list(set(texts) - set(grounding_cache.keys())) logger.info('Grounding a total of %d texts' % len(texts_to_ground)) for text_batch in tqdm.tqdm(batch_iter(texts_to_ground, batch_size=500, return_func=list), total=math.ceil(len(texts_to_ground)/500)): params = { 'ontologyYaml': ont_yml, 'texts': text_batch, 'topk': topk, 'isAlreadyCanonicalized': is_canonicalized, 'filter': filter } res = requests.post('%s/reground' % webservice, json=params) res.raise_for_status() grounding_for_texts = grounding_dict_to_list(res.json()) for txt, grounding in zip(text_batch, grounding_for_texts): grounding_cache[txt] = grounding all_results = [grounding_cache[txt] for txt in texts] if cache_path: with open(cache_path, 'wb') as fh: pickle.dump(grounding_cache, fh) return all_results def grounding_dict_to_list(groundings): """Transform the webservice response into a flat list.""" all_grounding_lists = [] for entry in groundings: grounding_list = [] for grounding_dict in entry: gr = grounding_dict['grounding'] # Strip off trailing slashes if gr.endswith('/'): gr = gr[:-1] grounding_list.append((gr, grounding_dict['score'])) grounding_list = sorted(grounding_list, key=lambda x: x[1], reverse=True) all_grounding_lists.append(grounding_list) return all_grounding_lists
{ "repo_name": "johnbachman/belpy", "path": "indra/sources/eidos/client.py", "copies": "1", "size": "4142", "license": "mit", "hash": -3191723436416305000, "line_mean": 33.2314049587, "line_max": 79, "alpha_frac": 0.597054563, "autogenerated": false, "ratio": 4.005802707930368, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 121 }
__all__ = ['prof4'] import pyfits as pf import numpy as np import matplotlib.pyplot as pl import sys import scipy.io class prof4(object): def __init__(self, fileIn): self.currentFile = fileIn self.readData() self.currentPos = [0,0] # Plot with integrated maps self.figFixedMaps = pl.figure(num=1, figsize=(5,8)) self.axFixedMaps = [None]*4 self.drawnFixedMaps = [None]*4 self.slitMaps = self.getMaps(0) for i in range(4): self.axFixedMaps[i] = self.figFixedMaps.add_subplot(2,2,i+1) self.drawnFixedMaps[i] = self.axFixedMaps[i].imshow(self.maps[i], aspect='equal') self.axFixedMaps[i].set_axis_off() cid = self.figFixedMaps.canvas.mpl_connect('motion_notify_event', self.onMouseMove) cid = self.figFixedMaps.canvas.mpl_connect('close_event', self.onKillWindows) # Plot with Stokes maps self.figMaps = pl.figure(num=2, figsize=(8,15)) self.canvasMaps= self.figMaps.canvas self.axMaps = [None]*4 self.drawnMaps = [None]*4 self.posMaps = [None]*4 self.backgroundMaps = [None]*4 for i in range(4): self.axMaps[i] = self.figMaps.add_subplot(4,1,i+1) self.drawnMaps[i] = self.axMaps[i].imshow(self.slitMaps[i,:,:], aspect='equal', animated=True) if (i == 0): self.posMaps[i], = self.axMaps[i].plot(200, 200, '+', animated=True) self.axMaps[i].set_xlim([0,self.nWavelength]) self.axMaps[i].set_ylim([0,self.nx]) self.canvasMaps.draw() for i in range(4): self.backgroundMaps[i] = self.figMaps.canvas.copy_from_bbox(self.axMaps[i].bbox) cid = self.figMaps.canvas.mpl_connect('close_event', self.onKillWindows) # Plot with Stokes profiles self.figStokes = pl.figure(num=3) self.canvasStokes = self.figStokes.canvas self.axStokes = [None]*4 self.drawnStokes = [None]*4 self.backgroundStokes = [None]*4 cid = self.figStokes.canvas.mpl_connect('close_event', self.onKillWindows) for i in range(4): self.axStokes[i] = self.figStokes.add_subplot(2,2,i+1) self.drawnStokes[i], = self.axStokes[i].plot(self.slitMaps[i,0,:], animated=True) self.figStokes.tight_layout() self.canvasStokes.draw() self.newAxes = self.figStokes.get_axes() for i in range(4): self.axStokes[i] = self.newAxes[i] self.backgroundStokes[i] = self.figStokes.canvas.copy_from_bbox(self.axStokes[i].bbox) def updateProfiles(self, redrawMaps=False): for i in range(4): self.figStokes.canvas.restore_region(self.backgroundStokes[i]) self.drawnStokes[i].set_ydata(self.slitMaps[i,self.currentPos[1],:]) self.axStokes[i].draw_artist(self.drawnStokes[i]) self.figStokes.canvas.blit(self.axStokes[i].bbox) if (redrawMaps): for i in range(4): self.figMaps.canvas.restore_region(self.backgroundMaps[i]) self.drawnMaps[i].set_array(self.slitMaps[i,:,:]) self.axMaps[i].draw_artist(self.drawnMaps[i]) if (i == 0): self.posMaps[i].set_ydata(self.nx-self.currentPos[1]) self.axMaps[i].draw_artist(self.posMaps[i]) self.figMaps.canvas.blit(self.axMaps[i].bbox) def onMouseMove(self, event): if (event.xdata != None and event.ydata != None): newPos = np.asarray([event.xdata, event.ydata]) newPos = newPos.astype(int) if (newPos[0] != self.currentPos[0]): self.slitMaps = self.getMaps(newPos[0]) self.currentPos = newPos self.updateProfiles(redrawMaps=True) if (newPos[1] != self.currentPos[1]): self.currentPos = newPos self.updateProfiles(redrawMaps=False) def onKillWindows(self, event): pl.close('all') def readData(self): self.hdu = pf.open(self.currentFile+'c') self.nx = self.hdu[0].header['NAXIS2'] self.nWavelength = self.hdu[0].header['NAXIS1'] self.nFrames = self.hdu[0].header['NAXIS3'] / 4 f = scipy.io.readsav(self.currentFile+'m') self.maps = [None] * 4 nameMap = ['toti','totq','totu','totv'] for i in range(4): self.maps[i] = np.flipud(f[nameMap[i]] / np.max(f[nameMap[i]])) def getProfiles(self, x, y): return self.hdu[0].data[4*x:4*x+4,y,:] def getMaps(self, x): return self.hdu[0].data[4*x:4*x+4,:,:] #p = prof4(sys.argv[1]) #def main(): #prof = prof4(sys.argv[1]) #if __name__ == "__main__": #main()
{ "repo_name": "aasensio/pyAndres", "path": "prof.py", "copies": "2", "size": "4198", "license": "mit", "hash": -8450813579192669000, "line_mean": 30.328358209, "line_max": 100, "alpha_frac": 0.6669842782, "autogenerated": false, "ratio": 2.5473300970873787, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8496512273614678, "avg_score": 0.1435604203345403, "num_lines": 134 }
__all__ = ["ProfilePlotter"] from functools import reduce import os import pstats import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import numpy as np class StatsCollection(object): def __init__(self, directory, logger=None): self.stats = {} # if the program is interrupted or killed during execution then the profiler # output can be messed up---just ignore if this happens, and print the list of # these later corrupted = [] def load_file(filepath): try: return pstats.Stats(filepath) except TypeError: # corrupted file: ignore & save corrupted.append(filepath) # walk all files in the profiling directory and save stats into the dictionary for path, _, files in os.walk(directory): files = [f for f in files if f.endswith(".prof")] split = path.split(os.sep) # walk down into the stats dictionary according to the split path list here = reduce(lambda acc, p: acc.get(p, {}), split[:-1], self.stats) # add entry to the stats dictionary here[split[-1]] = {f: load_file(os.path.join(path, f)) for f in files} here[split[-1]] = {f: stat for f, stat in here[split[-1]].items() if stat} if corrupted: msg = ( "couldn't load profile from the following files (probably corrupted): " + str(corrupted) ) if logger: logger.info(msg) else: print(msg) self.stats = self.stats[directory] class ProfilePlotter(object): """ Generate plots loaded from some profiling output previously recorded by a ``cdispyutils.profiling.Profiler``. """ def __init__(self, directory="profile"): self.stats = StatsCollection(directory) def make_all_plots(self, save_file="profile_graphs.pdf"): """ Using the stats loaded into the ``StatsCollection``, generate a series of plots for all the profiling files which are recorded, comparing the results from different runs for their shared profiling events. The stats in the ``StatsCollection`` have to be slightly reorganized for a couple of reasons: - We want to parametrize over separate runs, for every category - For WSGI's profiling output, we want to aggregate the results for each run into a single data point with an uncertainty value The ``results`` variable will look like this: { "init": { "app_register_blueprints.prof": { "run": ["2018-11-30T15:15:36.14", "2018-11-30T15:15:38.21"], "results": [[0.03], [0.04]], }, }, "wsgi": { "GET.root.prof": { "run": ["2018-11-30T15:15:36.14", "2018-11-30T15:15:38.21"], "results": [[0.0001, 0.0002, 0.0004], [0.0001]], }, "GET._status.prof": { "run": ["2018-11-30T15:15:36.14", "2018-11-30T15:15:38.21"], "results": [[0.002], [0.004]], } }, } Then we assemble plots for every profile in every category, where the data points are the run name as the x and the result time as the y. """ results = {} for run, categories in self.stats.stats.items(): for category, files in categories.items(): if category not in results: results[category] = {} aggregator = ( _aggregate_wsgi_filename if category == "wsgi" else _aggregate_profiler_filename ) for filename, times in _aggregate_results(files, aggregator).items(): if filename not in results[category]: results[category][filename] = {} if run not in results[category][filename]: results[category][filename][run] = [] results[category][filename][run].extend(times) with PdfPages(save_file) as pdf: for category, profiles in results.items(): for profile, data in profiles.items(): figure = plt.figure() figure.suptitle("{}: {}".format(category, profile), fontsize=16) axes = figure.subplots() axes.margins(0.05) axes.set_xlabel("Run ID") axes.set_ylabel("Time (s)") scatter_x = [] scatter_y = [] errorbar_x = [] errorbar_y = [] errorbar_dy = [] for run, times in data.items(): if len(times) > 1: axes.scatter( len(times) * [run], times, s=3, c="C1", zorder=10 ) errorbar_x.append(run) errorbar_y.append(np.mean(times)) errorbar_dy.append(np.std(times)) else: scatter_x.append(run) scatter_y.append(times[0]) axes.scatter(scatter_x, scatter_y, c="C0") axes.errorbar( errorbar_x, errorbar_y, yerr=errorbar_dy, fmt="oC0", capsize=4 ) plt.setp( axes.get_xticklabels(), rotation=45, horizontalalignment="right" ) pdf.savefig(figure, bbox_inches="tight") def _aggregate_wsgi_filename(filename): """ The WSGI profiler outputs files like this: GET.root.000003ms.1543612537.prof For comparison in our plotter we want them to look like this: GET.root """ return ".".join(filename.split(".")[:2]) def _aggregate_profiler_filename(filename): """ The Profiler class outputs files names like this: cdispyutils.profiling.vis._aggregate_profiler_filename-1.prof For the plotter we want to keep only this part for the names: cdispyutils.profiling.vis._aggregate_profiler_filename """ return filename.split("-")[0] def _aggregate_results(file_stats, f_aggregate): """ Using the Profiler to run some functions multiple times in a given category will produce some results like this: run/ a-1.prof b-1.prof a-2.prof a-3.prof b-2.prof Here we aggregate the timing results from repeated executions of the same function. For collecting profiling results from the WSGI output directory, we want to take something like this: wsgi/ GET.root.000003ms.1543612537.prof GET.root.000002ms.1543612540.prof GET.root.000003ms.1543612541.prof GET.root.000003ms.1543612543.prof GET._status.000019ms.1543612539.prof And aggregate them so the results are like this (but collecting lists of timings): wsgi/ GET.root.prof GET._status.prof Note that this methodology has a minor flaw: WSGI does not differentiate between multiple requests in the same second to the same endpoint which take the same length of time, so the variance in the results from this function may be inflated when this happens. Args: file_stats (Dict[str, pstats.Stats]): mapping from profile filename to stats Return: Dict[str, List[float]] Example: {"GET.root.prof": [0.003, 0.002, 0.003, 0.003], "GET._status.prof": [0.019]} """ results = {} for filename, stat in file_stats.items(): filename = f_aggregate(filename) if filename not in results: results[filename] = [] results[filename].append(stat.total_tt) return results
{ "repo_name": "uc-cdis/cdis-python-utils", "path": "cdispyutils/profiling/vis.py", "copies": "1", "size": "8290", "license": "apache-2.0", "hash": -849406242004646400, "line_mean": 35.0434782609, "line_max": 88, "alpha_frac": 0.5394451146, "autogenerated": false, "ratio": 4.414270500532481, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0019732216461067313, "num_lines": 230 }
__all__ = ('Profile', 'ProfileNotFoundError') import cbpos from .driver import Driver import os class Profile(object): def __init__(self, name, driver, host=None, port=None, username=None, password=None, database=None, query=None, drivername=None): self.__config_name = None self.__name = None self.name = name # Could be different than driver.name, e.g. "mysql" or "mysql+pymysql" self.drivername = drivername self.__driver = None self.driver = driver self.host = host self.port = port self.username = username self.password = password self.__database = None self.database = database self.query = query self.editable = True # Cache for future calls to Profile.get(name) self.__profiles[name] = self @property def name(self): return self.__name @name.setter def name(self, n): self.__config_name = n if self.__name is None else self.__name self.__name = n @property def driver(self): return self.__driver @driver.setter def driver(self, d): self.__driver = d if d is not None and \ (not self.drivername or \ self.drivername.split('+', 1)[0] != d.name): # No specific drivername is requested # Or the selected drivername does not match the driver # Then update the driver name to the default of the selected driver self.drivername = d.name @property def database(self): return self.__database @database.setter def database(self, d): if self.__driver.use_database_as_filename and \ self.host is None and \ d is not None and \ not os.path.isabs(d): self.__database = os.path.join(cbpos.config.env.data_dir, d) else: self.__database = d def save(self): if not self.editable: return cbpos.config['db.'+self.__config_name] = dict(self) if self.__name != self.__config_name: # If the name changed from when it was created # Update the configuration cbpos.config['db.'+self.__name] = cbpos.config['db.'+self.__config_name] cbpos.config['db.'+self.__config_name] = None # And update the cache self.__profiles[self.__name] = self try: self.__profiles.pop(self.__config_name) except KeyError: pass # Update the config name self.__config_name = self.__name # And save it back to the config anyway cbpos.config.save() def use(self): cbpos.config['db', 'used'] = self.__config_name cbpos.config.save() def delete(self): if not self.editable: return cbpos.config['db.'+self.__config_name] = None try: self.__profiles.pop(self.__config_name) except KeyError: pass cbpos.config.save() __options = ('host', 'port', 'username', 'password', 'database', 'query', 'drivername') def __iter__(self): for s in self.__options: v = getattr(self, s) if v is not None and s not in self.driver.empty_fields: yield (s, v) def __repr__(self): return '<Profile %s>' % (self.name,) __profiles = {} @classmethod def get(cls, profile_name): if profile_name not in cls.__profiles: config = 'db.'+profile_name drivername = cbpos.config[config, 'drivername'] if drivername is None: raise ProfileNotFoundError('Profile not found: {}'.format(repr(profile_name))) driver = Driver.get(drivername) kwargs = dict([(a, cbpos.config[config, a]) for a in cls.__options]) p = cls(profile_name, driver, **kwargs) # It saves itself in the __profiles dict return cls.__profiles[profile_name] @classmethod def get_used(cls): profile_name = cbpos.config['db', 'used'] return cls.get(profile_name) @classmethod def get_all_names(cls): return cls.__profiles.keys() @classmethod def get_all(cls): for section_name, s in cbpos.config: if section_name.startswith('db.'): Profile.get(section_name[3:]) return cls.__profiles.values() default = Profile(name='default', driver=Driver.get('sqlite'), database='default.sqlite') default.editable = False class ProfileNotFoundError(ValueError): pass
{ "repo_name": "coinbox/coinbox-core", "path": "cbpos/database/profile.py", "copies": "1", "size": "4859", "license": "mit", "hash": -8748433789783561000, "line_mean": 30.3483870968, "line_max": 94, "alpha_frac": 0.5392055979, "autogenerated": false, "ratio": 4.284832451499118, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.02853513910082705, "num_lines": 155 }
__all__ = ['projected_correlation'] from itertools import izip import math import numpy as np from scipy.integrate import quad from fast3tree import fast3tree def _yield_periodic_points(center, dcorner1, dcorner2, box_size): cc = np.array(center) flag = (cc+dcorner1 < 0).astype(int) - (cc+dcorner2 >= box_size).astype(int) cp = cc + flag*box_size a = range(len(cc)) for j in xrange(1 << len(cc)): for i in a: if j >> i & 1 == 0: cc[i] = center[i] elif flag[i]: cc[i] = cp[i] else: break else: yield cc def _jackknife_2d_random(rbins, box_size, jackknife_nside): def corner_area(x, y): a = math.sqrt(1.0-x*x)-y b = math.sqrt(1.0-y*y)-x theta = math.asin(math.sqrt(a*a+b*b)*0.5)*2.0 return (a*b + theta - math.sin(theta))*0.5 def segment_without_corner_area(x, r): half_chord = math.sqrt(1.0-x*x) return math.acos(x) - x*half_chord \ - quad(corner_area, 0, min(half_chord, 1.0/r), (x,))[0]*r def overlapping_circular_areas(r): if r*r >= 2: return 1.0 return (math.pi - quad(segment_without_corner_area, 0, min(1, 1.0/r), \ (r,))[0]*4.0*r)*r*r overlapping_circular_areas_vec = np.vectorize(overlapping_circular_areas, \ [float]) side_length = box_size/float(jackknife_nside) square_area = 1.0/float(jackknife_nside*jackknife_nside) rbins_norm = rbins/side_length annulus_areas = np.ediff1d(overlapping_circular_areas_vec(rbins_norm)) annulus_areas /= np.ediff1d(rbins_norm*rbins_norm)*math.pi return 1.0 - square_area * (2.0 - annulus_areas) def projected_correlation(points, rbins, zmax, box_size, jackknife_nside=0, \ bias_correction=True): """ Calculate the projected correlation function wp(rp) and its covariance matrix for a periodic box, with the plane-parallel approximation and the Jackknife method. Parameters ---------- points : array_like Must be a 2-d array whose last dimension is 3 (i.e. has 3 columns) The last column will be used as the redshift distance. rbins : array_like A 1-d array that has the edges of the rp bins. Must be sorted. zmax : float The integral of \pi goes from -zmax to zmax (redshift distance). box_size : float The side length of the periodic box. jackknife_nside : int, optional (Default: 0) If <= 1 , it will not do Jackknife. bias_correction : bool, optional (Default: True) If true and jackknife_nside > 1, use jackknife resampling to correct the bias. Returns ------- wp : ndarray A 1-d array that has wp. The length of this retured array would be len(rbins) - 1. wp_cov : ndarray (returned if jackknife_nside > 1) The len(wp) by len(wp) covariance matrix of wp. """ points = np.asarray(points) s = points.shape if len(s) != 2 or s[1] != 3: raise ValueError('`points` must be a 2-d array with last dim=3') N = s[0] rbins = np.asarray(rbins) rbins_sq = rbins*rbins dcorner2 = np.array([rbins[-1], rbins[-1], zmax]) dcorner1 = -dcorner2 if np.any(dcorner2*2 > box_size): print "[Warning] box too small!" pairs_rand = float(N*N) / box_size**3 \ * (rbins[1:]**2-rbins[:-1]**2)*np.pi*zmax*2.0 jackknife_nside = int(jackknife_nside) if jackknife_nside <= 1: #no jackknife dcorner1[2] = 0 #save some time pairs = np.zeros(len(rbins)-1, dtype=int) with fast3tree(points) as tree: for p in points: for pp in _yield_periodic_points(p,dcorner1,dcorner2,box_size): x,y=tree.query_box(pp+dcorner1,pp+dcorner2,output='p').T[:2] x -= pp[0]; x *= x y -= pp[1]; y *= y x += y; x.sort() pairs += np.ediff1d(np.searchsorted(x, rbins_sq)) return (pairs.astype(float)*2.0/pairs_rand - 1.0) * zmax*2.0 else: #do jackknife jack_ids = np.floor(np.remainder(points[:,0], box_size)\ / box_size*jackknife_nside).astype(int) jack_ids += np.floor(np.remainder(points[:,1], box_size)\ / box_size*jackknife_nside).astype(int) * jackknife_nside n_jack = jackknife_nside*jackknife_nside pairs = np.zeros((n_jack, len(rbins)-1), dtype=int) auto_pairs = np.zeros_like(pairs) with fast3tree(points) as tree: for p, jid in izip(points, jack_ids): for pp in _yield_periodic_points(p,dcorner1,dcorner2,box_size): idx,pos = tree.query_box(pp+dcorner1,pp+dcorner2,output='b') x, y = pos.T[:2] x -= pp[0]; x *= x y -= pp[1]; y *= y x += y y = x[jack_ids[idx]==jid] y.sort(); x.sort() pairs[jid] += np.ediff1d(np.searchsorted(x, rbins_sq)) auto_pairs[jid] += np.ediff1d(np.searchsorted(y, rbins_sq)) idx = pos = x = y = None pairs_sum = pairs.sum(axis=0) pairs = pairs_sum - pairs*2 + auto_pairs wp_jack = (pairs.astype(float) \ / pairs_rand \ / _jackknife_2d_random(rbins, box_size, jackknife_nside)\ - 1.0) * zmax*2.0 wp_full = (pairs_sum.astype(float)/pairs_rand - 1.0) * zmax*2.0 if bias_correction: wp = wp_full*n_jack - wp_jack.mean(axis=0)*(n_jack-1) else: wp = wp_full wp_cov = np.cov(wp_jack, rowvar=0, bias=1)*(n_jack-1) return wp, wp_cov def correlation3d(points, rbins, box_size): """ Calculate the 3D correlation function xi(r) for a periodic box. Parameters ---------- points : array_like Must be a 2-d array whose last dimension is 3 (i.e. has 3 columns). rbins : array_like A 1-d array that has the edges of the rp bins. Must be sorted. box_size : float The side length of the periodic box. Returns ------- xi : ndarray A 1-d array that has wp. The length of this retured array would be len(rbins) - 1. """ points = np.asarray(points) s = points.shape if len(s) != 2 or s[1] != 3: raise ValueError('`points` must be a 2-d array with last dim=3') N = s[0] rbins = np.asarray(rbins) pairs_rand = float(N*N) / box_size**3 \ * (rbins[1:]**3-rbins[:-1]**3)*(np.pi*4.0/3.0) pairs = np.zeros(len(rbins)-1, dtype=int) with fast3tree(points) as tree: tree.set_boundaries(0, box_size) for p in points: pairs += np.ediff1d([tree.query_radius(p, r, periodic=True, \ output='c') for r in rbins]) return pairs.astype(float)/pairs_rand - 1.0
{ "repo_name": "manodeep/yymao-helpers", "path": "helpers/CorrelationFunction.py", "copies": "1", "size": "6977", "license": "mit", "hash": 5775687129956282000, "line_mean": 36.3101604278, "line_max": 80, "alpha_frac": 0.5599828006, "autogenerated": false, "ratio": 3.1329142343960483, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.911645204035253, "avg_score": 0.015288998928703773, "num_lines": 187 }
__all__ = ['Project', 'Experiment', 'Result', 'Stat', 'Variation', 'Goal', 'Audience'] import json import urllib class ResourceGenerator(object): def __init__(self, client=None, resource=None): if client is None: raise ValueError('Must specify client.') if resource is None: raise ValueError('Must specify resource.') self.client = client self.resource = resource def get(self, optimizely_ids=None): if not optimizely_ids: return self.resource.list(client=self.client) elif type(optimizely_ids) == int or type(optimizely_ids) == long: instance = self.resource(self.client, optimizely_id=optimizely_ids) instance.refresh() return instance elif type(optimizely_ids) == list: response_list = [] for optimizely_id in optimizely_ids: response_list.append(self.get(optimizely_id)) return response_list def create(self, data): return self.resource.create(data, self.client) def update(self, rid, data): return self.resource.update(rid, data, self.client) class APIObject(object): def __init__(self, client, optimizely_id=None): self.client = client if optimizely_id: self.id = optimizely_id self.refresh() def refresh(self): if not hasattr(self, 'id'): raise AttributeError('%s object has no ID, so it cannot be refreshed' % self.class_name()) self._refresh_from(self.client.request('get', [self.class_url(), self.id])) def _refresh_from(self, params): for k, v in params.iteritems(): self.__setattr__(k, v) @classmethod def class_name(cls): if cls == APIObject: raise NotImplementedError( 'APIObject is an abstract class. You should perform ' 'actions on its subclasses (e.g. Project, Experiment)') return cls.__name__.lower() @classmethod def class_url(cls): return '%ss' % cls.class_name() def get_child_objects(self, resource): resp = [] for li in self.client.request('get', [self.class_url(), self.id, resource.class_url()]): e = resource(self.client) e._refresh_from(li) resp.append(e) return resp class ListableObject(APIObject): @classmethod def list(cls, client): resp = [] for li in client.request('get', [cls.class_url()]): e = cls(client) e._refresh_from(li) resp.append(e) return resp class CreatableObject(APIObject): @classmethod def create(cls, data, client): instance = cls(client) instance._refresh_from(client.request('post', [cls.class_url()], data=json.dumps(data), headers={'Content-Type': 'application/json'})) return instance class CreatableChildObject(APIObject): parent_resource = None @classmethod def create(cls, data, client): instance = cls(client) instance._refresh_from(client.request('post', [cls.parent_resource.class_url(), data['%s_id' % cls.parent_resource.class_name()], cls.class_url()], data=json.dumps(data), headers={'Content-Type': 'application/json'})) return instance class UpdatableObject(APIObject): editable_fields = [] def save(self): self._refresh_from(self.update(self.id, self.__dict__, self.client).__dict__) @classmethod def update(cls, rid, data, client): updates = {} for k, v in data.iteritems(): if k in cls.editable_fields: updates[k] = v resp = client.request('put', [cls.class_url(), rid], data=json.dumps(updates), headers={'Content-Type': 'application/json'}) instance = cls(client) instance._refresh_from(resp) return instance class DeletableObject(APIObject): def delete(self): self.client.request('delete', [self.class_url(), self.id]) class Project(ListableObject, CreatableObject, UpdatableObject): editable_fields = ['project_status', 'project_name', 'include_jquery', 'project_javascript', 'enable_force_variation', 'exclude_disabled_experiments', 'exclude_names', 'ip_anonymization', 'ip_filter'] def experiments(self): return self.get_child_objects(Experiment) def goals(self): return self.get_child_objects(Goal) def audiences(self): return self.get_child_objects(Audience) def dimensions(self): return self.get_child_objects(Dimension) class Experiment(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['audience_ids', 'activation_mode', 'description', 'edit_url', 'status', 'custom_css', 'custom_js', 'percentage_included', 'url_conditions'] def results(self): return self.get_child_objects(Result) def stats(self): return self.get_child_objects(Stat) def variations(self): return self.get_child_objects(Variation) def schedules(self): return self.get_child_objects(Schedule) def add_goal(self, gid): goal = Goal(self.client, gid) experiment_ids = set(goal.experiment_ids) experiment_ids.add(self.id) goal.experiment_ids = list(experiment_ids) return goal.save() def remove_goal(self, gid): goal = Goal(self.client, gid) goal.refresh() experiment_ids = set(goal.experiment_ids) experiment_ids.remove(self.id) goal.experiment_ids = list(experiment_ids) return goal.save() class Result(APIObject): pass class Stat(APIObject): pass class Variation(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Experiment editable_fields = ['description', 'is_paused', 'js_component', 'weight'] class Goal(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['addable', 'archived', 'description', 'experiment_ids', 'goal_type', 'selector', 'target_to_experiments', 'target_urls', 'target_url_match_types', 'title', 'urls', 'url_match_types'] class Audience(CreatableChildObject, UpdatableObject): parent_resource = Project editable_fields = ['name', 'description', 'conditions', 'segmentation'] class Dimension(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['name', 'client_api_name', 'description'] class Schedule(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Experiment editable_fields = ['start_time', 'stop_time']
{ "repo_name": "experimentengine/optimizely-client-python", "path": "optimizely_ee/resource.py", "copies": "1", "size": "7224", "license": "mit", "hash": 1953770068396645000, "line_mean": 28.606557377, "line_max": 100, "alpha_frac": 0.5915005537, "autogenerated": false, "ratio": 4.035754189944134, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5127254743644134, "avg_score": null, "num_lines": null }
__all__ = ['Project', 'Experiment', 'Result', 'Variation', 'Goal', 'Audience'] import json import urllib class ResourceGenerator(object): def __init__(self, client=None, resource=None): if client is None: raise ValueError('Must specify client.') if resource is None: raise ValueError('Must specify resource.') self.client = client self.resource = resource def get(self, optimizely_ids=None): if not optimizely_ids: return self.resource.list(client=self.client) elif type(optimizely_ids) == int: instance = self.resource(self.client, optimizely_id=optimizely_ids) instance.refresh() return instance elif type(optimizely_ids) == list: response_list = [] for optimizely_id in optimizely_ids: response_list.append(self.get(optimizely_id)) return response_list def create(self, data): return self.resource.create(data, self.client) def update(self, rid, data): return self.resource.update(rid, data, self.client) class APIObject(object): def __init__(self, client, optimizely_id=None): self.client = client if optimizely_id: self.id = optimizely_id self.refresh() def refresh(self): if not hasattr(self, 'id'): raise AttributeError('%s object has no ID, so it cannot be refreshed' % self.class_name()) self._refresh_from(self.client.request('get', [self.class_url(), self.id])) def _refresh_from(self, params): for k, v in params.iteritems(): self.__setattr__(k, v) @classmethod def class_name(cls): if cls == APIObject: raise NotImplementedError( 'APIObject is an abstract class. You should perform ' 'actions on its subclasses (e.g. Project, Experiment)') return cls.__name__.lower() @classmethod def class_url(cls): return '%ss' % cls.class_name() def get_child_objects(self, resource): resp = [] for li in self.client.request('get', [self.class_url(), self.id, resource.class_url()]): e = resource(self.client) e._refresh_from(li) resp.append(e) return resp class ListableObject(APIObject): @classmethod def list(cls, client): resp = [] for li in client.request('get', [cls.class_url()]): e = cls(client) e._refresh_from(li) resp.append(e) return resp class CreatableObject(APIObject): @classmethod def create(cls, data, client): instance = cls(client) instance._refresh_from(client.request('post', [cls.class_url()], data=json.dumps(data), headers={'Content-Type': 'application/json'})) return instance class CreatableChildObject(APIObject): parent_resource = None @classmethod def create(cls, data, client): instance = cls(client) instance._refresh_from(client.request('post', [cls.parent_resource.class_url(), data['%s_id' % cls.parent_resource.class_name()], cls.class_url()], data=json.dumps(data), headers={'Content-Type': 'application/json'})) return instance class UpdatableObject(APIObject): editable_fields = [] def save(self): self._refresh_from(self.update(self.id, self.__dict__, self.client).__dict__) @classmethod def update(cls, rid, data, client): updates = {} for k, v in data.iteritems(): if k in cls.editable_fields: updates[k] = v resp = client.request('put', [cls.class_url(), rid], data=json.dumps(updates), headers={'Content-Type': 'application/json'}) instance = cls(client) instance._refresh_from(resp) return instance class DeletableObject(APIObject): def delete(self): self.client.request('delete', [self.class_url(), self.id]) class Project(ListableObject, CreatableObject, UpdatableObject): editable_fields = ['ip_filter', 'include_jquery', 'project_name', 'project_status'] def experiments(self): return self.get_child_objects(Experiment) def goals(self): return self.get_child_objects(Goal) def audiences(self): return self.get_child_objects(Audience) def dimensions(self): return self.get_child_objects(Dimension) class Experiment(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['audience_ids', 'activation_mode', 'description', 'edit_url', 'status', 'custom_css', 'custom_js', 'percentage_included', 'url_conditions'] def results(self): return self.get_child_objects(Result) def variations(self): return self.get_child_objects(Variation) def schedules(self): return self.get_child_objects(Schedule) def add_goal(self, gid): goal = Goal(self.client, gid) experiment_ids = set(goal.experiment_ids) experiment_ids.add(self.id) goal.experiment_ids = list(experiment_ids) return goal.save() def remove_goal(self, gid): goal = Goal(self.client, gid) goal.refresh() experiment_ids = set(goal.experiment_ids) experiment_ids.remove(self.id) goal.experiment_ids = list(experiment_ids) return goal.save() class Result(APIObject): pass class Variation(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Experiment editable_fields = ['description', 'is_paused', 'js_component', 'weight'] class Goal(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['addable', 'experiment_ids', 'goal_type', 'selector', 'target_to_experiments', 'target_urls', 'target_url_match_types', 'title', 'urls', 'url_match_types'] class Audience(CreatableChildObject, UpdatableObject): parent_resource = Project editable_fields = ['name', 'description', 'conditions', 'segmentation'] class Dimension(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Project editable_fields = ['name', 'client_api_name', 'description'] class Schedule(CreatableChildObject, UpdatableObject, DeletableObject): parent_resource = Experiment editable_fields = ['start_time', 'stop_time']
{ "repo_name": "wlowry88/optimizely-client-python", "path": "optimizely/resource.py", "copies": "1", "size": "7370", "license": "mit", "hash": -210580996431257100, "line_mean": 29.0816326531, "line_max": 104, "alpha_frac": 0.5527815468, "autogenerated": false, "ratio": 4.332745443856555, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5385526990656555, "avg_score": null, "num_lines": null }
__all__ = [ 'Property' ] import numpy as np import logging import warnings from .utilz import ut2s, s2ut, Ry, Rz, Ry_inv, Rz_inv class Property( dict ): """ **An object representing properties as numpy.ndarray types mapped to by python dictionaries.** **Supports up to quadrupoles, upper triangular polarizability and upper trianguler hyperpolarizability** .. code:: python >>> p = Property() >>> print p["charge"] [0.0] >>> print p["dipole"] [0.0, 0.0, 0.0] """ def __init__(self): self["charge"] = 0.0 self["dipole"] = np.zeros( 3 ) self["quadrupole"] = np.zeros( 6 ) self["alpha"] = np.zeros( 6 ) self["beta"] = np.zeros( 10 ) def copy_property(self): p = Property() p["charge"] = self["charge"] p["dipole"] = self["dipole"].copy() p["quadrupole"] = self["quadrupole"].copy() p["alpha"] = self["alpha"].copy() p["beta"] = self["beta"].copy() return p def __getitem__(self, item): new = { 'c' : 'charge', 'd' : 'dipole', 'q' :'quadrupole', 'a' : 'alpha', 'b' : 'beta' } try: key = new[ item[0].lower() ] except KeyError: logging.error("unknown command for getting item of Property") raise SystemExit return super(Property, self).__getitem__( key ) def __add__(self, other): tmp = Property() for i, prop in enumerate(self): tmp[prop] = np.array( self[prop] ) + np.array(other[prop] ) return tmp def __sub__(self, other): assert isinstance( other, Property) tmp = Property() for i, prop in enumerate(self): tmp[prop] = np.array( self[prop] ) - np.array(other[prop] ) return tmp def __div__(self, other): tmp = Property() for i, prop in enumerate(self): tmp[prop] = np.array( self[prop] )/float(other) return tmp def is_null(self): empty = True for key, val in self.iteritems(): if not np.allclose( np.zeros( np.array((val,)).shape ), np.array((val,)) , atol = 1e-14): empty = False return empty @property def q(self): return self['charge'] @property def d(self): return self['dipole'] @property def Q(self): return self['quadrupole'] @property def a(self): return self['alpha'] @property def b(self): return self['beta'] @q.setter def q(self, val): self['charge'] = val @d.setter def d(self, val): assert val.shape == (3,) self['dipole'] = val @Q.setter def Q(self, val): assert val.shape == (6,) self['quadrupole'] = val @a.setter def a(self, val): assert val.shape == (6,) self['alpha'] = val @b.setter def b(self, val): assert val.shape == (10,) self['beta'] = val @property def b_proj(self): """ Rotationally invariant property Beta projected on the dipole moment vector for a whole molecule / segment. Should not be used if only for an atom """ return np.einsum('ijj,i', ut2s(self.b), self.d)/np.linalg.norm(self.d) #* #self.d / np.linalg.norm( self.d ) #Method of Property def potline(self, max_l =2 , pol= 22, hyper=1, fmt = "%.7f "): string = "" if 0 <= max_l : string += fmt % self["charge"] if max_l >= 1 : string += fmt*3 %( self["dipole"][0], self["dipole"][1], self["dipole"][2] ) if max_l >= 2 : string += fmt*6 %( self["quadrupole"][0], self["quadrupole"][1], self["quadrupole"][2] , self["quadrupole"][3], self["quadrupole"][4], self["quadrupole"][5] ) if pol == 1: string += fmt %( float(self["alpha"][0] + self["alpha"][3] + self["alpha"][5])/3, ) elif pol %10 == 2 : string += fmt * 6 %( self["alpha"][0], self["alpha"][1], self["alpha"][2] , self["alpha"][3], self["alpha"][4], self["alpha"][5] ) if hyper == 1: string += fmt*10 %( self["beta"][0], self["beta"][1], self["beta"][2] , self["beta"][3], self["beta"][4], self["beta"][5] , self["beta"][6], self["beta"][7], self["beta"][8] , self["beta"][9]) return string @staticmethod def from_propline( st, maxl = 2, pol = 22, hyper = 2 ): """ Given dalton POT, returns class Property that can be attached to Atom. Convinience function for generating properties for the class Molecule directly by invoking dalton on a supercomputer. >>> p = Property.from_propline( "1 0.0 0.0 0.0 -0.25", maxl = 0 ) >>> at.Property = p """ st = map( float, st.split()[4:] ) p = Property() p['charge'] = st.pop(0) if maxl > 0: for i in range(3): p['dipole'][i] = st.pop(0) if maxl > 1: for i in range(6): p['quadrupole'][i] = st.pop(0) if pol == 1: iso = st.pop(0) p['alpha'][0] = iso p['alpha'][3] = iso p['alpha'][5] = iso elif pol%10 == 2: for i in range(6): p['alpha'][i] = st.pop(0) if hyper == 2: for i in range(10): p['beta'][i] = st.pop(0) return p @staticmethod def from_template( at_string, template ): """Given string for atomic label, and dictionary template for the molecule, will return all properties found in template.py for this template """ all_props = [ 'charge', 'dipole', 'quadrupole', 'alpha', 'beta' ] for p in all_props: if (at_string, p ) not in template: print "'( %s, %s )' not found in provided template" %(at_string, p) warnings.warn("Could not find in provided template" ) return Property() p = Property() for key in template: if key[0] == at_string: for each in all_props: p[ each ] = template[ (at_string, each ) ] return p def inv_rotate( self, t1, t2, t3, plane = 'xz' ): """Rotate all properties by t1, t2, t3 t1 negative rotation around Z-axis t2 positiv rotation around Y-axis t3 negative rotation around Z-axis """ rots = { 'xz' : (Rz_inv(t1), Ry(t2), Rz_inv(t3) ), 'xy' : (Ry_inv(t1), Rz(t2), Ry_inv(t3) ) } p = Property() r1 = rots[ plane ][0] r2 = rots[ plane ][1] r3 = rots[ plane ][2] p.q = self.q p.d = np.einsum('ab,bc,cd,d', r3, r2, r1, self.d ) p.a = s2ut( np.einsum('ec,fd,ca,db,ai,bj,ij', r3, r3, r2, r2, r1, r1, ut2s(self.a) ) ) p.Q = s2ut( np.einsum('ec,fd,ca,db,ai,bj,ij', r3, r3, r2, r2, r1, r1, ut2s(self.Q) ) ) p.b = s2ut( np.einsum('Id,Je,Kf,da,eb,fc,ai,bj,ck,ijk', r3, r3, r3, r2, r2, r2, r1, r1, r1, ut2s(self.b) ) ) return p def rotate( self, t1, t2, t3 ): """Rotate all properties by t1, t2, t3 t1 positive rotation around Z-axis t2 negative rotation around Y-axis t3 positive rotation around Z-axis """ p = Property() r1 = Rz(t1) r2 = Ry_inv(t2) r3 = Rz(t3) p.q = self.q p.d = np.einsum('ab,bc,cd,d', r3, r2, r1, self.d ) p.a = s2ut( np.einsum('ec,fd,ca,db,ai,bj,ij', r3, r3, r2, r2, r1, r1, ut2s(self.a) ) ) p.Q = s2ut( np.einsum('ec,fd,ca,db,ai,bj,ij', r3, r3, r2, r2, r1, r1, ut2s(self.Q) ) ) p.b = s2ut( np.einsum('Id,Je,Kf,da,eb,fc,ai,bj,ck,ijk', r3, r3, r3, r2, r2, r2, r1, r1, r1, ut2s(self.b) ) ) return p def transform_by_matrix(self, matrix): """docstring for by_matrix""" assert matrix.shape == (3,3,) p = Property() p.q = self.q p.d = np.einsum( 'ij,j', matrix, self.d ) p.a = s2ut(np.einsum( 'ai,bj,ij', matrix, matrix, ut2s(self.a) )) p.Q = s2ut(np.einsum( 'ai,bj,ij', matrix, matrix, ut2s(self.Q) )) p.b = s2ut(np.einsum( 'ai,bj,ck,ijk', matrix, matrix, matrix, ut2s(self.b) )) return p def transform_ut_2( self, prop, t1, t2 ,t3 ): tmp = Rotator.ut_2_square( prop ) tmp = Rotator.transform_2( tmp , t1 ,t2 ,t3 ) tmp = Rotator.square_2_ut( tmp ) return tmp def transform_ut_3( self, prop, t1, t2 ,t3 ): tmp = Rotator.ut_3_square( prop ) tmp = Rotator.transform_3( tmp, t1 ,t2 ,t3 ) tmp = Rotator.square_3_ut( tmp ) return tmp
{ "repo_name": "fishstamp82/moltools", "path": "moltools/property.py", "copies": "1", "size": "8912", "license": "mit", "hash": -1415738511435297300, "line_mean": 31.2898550725, "line_max": 116, "alpha_frac": 0.4971947935, "autogenerated": false, "ratio": 3.182857142857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9071115257683441, "avg_score": 0.021787335734740364, "num_lines": 276 }
__all__ = ['ProtocolCommand'] class ProtocolCommand(object): # protocol version 1.0 DEFAULT_PROTOCOL_VERSION_MAJOR = 1 DEFAULT_PROTOCOL_VERSION_MINOR = 0 # protocol message types PROTOCOL_REQ = 0 PROTOCOL_RES = 1 def __init__(self, node, protocol_major_version, protocol_minor_version, protocol_command_code): self.node = node if protocol_major_version is None: protocol_major_version = self.DEFAULT_PROTOCOL_VERSION_MAJOR if protocol_minor_version is None: protocol_minor_version = self.DEFAULT_PROTOCOL_VERSION_MINOR self.protocol_major_version = protocol_major_version self.protocol_minor_version = protocol_minor_version self.protocol_command_code = protocol_command_code def start(self): raise NotImplementedError def stop(self): raise NotImplementedError def req(self): raise NotImplementedError def on_req(self, remote_host, remote_port, *args, **kwargs): raise NotImplementedError def res(self, remote_host, remote_port, *args, **kwargs): raise NotImplementedError def on_res(self, remote_host, remote_port, res): raise NotImplementedError
{ "repo_name": "mtasic85/routingtable", "path": "protocol_command.py", "copies": "1", "size": "1248", "license": "mit", "hash": -2311948075937197600, "line_mean": 29.4390243902, "line_max": 100, "alpha_frac": 0.6642628205, "autogenerated": false, "ratio": 4.303448275862069, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.013494394286006097, "num_lines": 41 }
__all__ = ["prox_rank1_box"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_box(lwr, upr, *args, **kwargs): """ PROJ_RANK1_BOX returns the scaled proximity operator for box constraints x = proj_rank1_box( lwr, upr, x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) is the indicator function of the set { x : lwr <= x <= upr } (Set any component of lwr to -Inf and upr to +Inf to effectively ignore those particular constraints) There are also variants: x = proj_rank1_box( lwr, upr, x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Note that UNLIKE prox_rank1_l1.m and other functions, the calling sequence is slighty different, since you must pass in "lwr" and "upr" Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 BSee also prox_rank1_generic.m Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : np.max([np.min([upr,x]), lwr]) prox_brk_pts = lambda s : np.hstack((lwr,upr)) return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyiacsun", "path": "pyiacsun/sparse/proxes_rank1/prox_rank1_box.py", "copies": "2", "size": "1608", "license": "mit", "hash": -888966984019115600, "line_mean": 33.2127659574, "line_max": 81, "alpha_frac": 0.6517412935, "autogenerated": false, "ratio": 2.5935483870967744, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42452896805967744, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_generic"] import numpy as np import sys def prox_rank1_generic(prox, prox_brk_pts, x0, d, u = None, Lambda = None, linTerm = None, plusminus=1, invert = True, verbose = False): """ PROX_RANK1_GENERIC returns the scaled proximity operator for a generic function h (provided the generic function is separable and has a piece-wise linear prox) This function is intended be used as follows: (1) Instantiate: scaledProx = lambda x0, d, u, varargin = None : prox_rank1_l1(x0, d, u, l)@(varargin) prox_rank1_generic( prox, prox_brk_pts,varargin{:}) where 'prox' and 'prox_brk_pts' implicitly define the function h i.e., prox(x0,t) = argmin_{x} t*h(x) + 1/2||x-x0||^2 and prox_brk_pts(t) returns a row-vector with the break points that specify where t*h(x) is piecewise linear (this is if h(x) = [ h_1(x_1); ... ; h_n(x_n) ]. If instead not all the h_i are identical, prox_brk_pts(t) should return a matrix). See the examples below because prox_brk_pts must allow a vector "t" so you must define this appropriately. (2) Call the "scaledProx" function, which has signature: x = scaledProx( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. There are also variants: x = scaledProx( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Examples: 1. if h(x) = ||x||_1 then prox = @(x,t) sign(x).*max(0, abs(x) - t ); prox_brk_pts = @(t) [-t,t]; 2. if h(x) is the indicator function of the set { x : x >= 0}, then prox = @(x,t) max(0, x); prox_brk_pts = @(t) 0; 3. if h(x) is the indicator function of the set { x : lwr <= x <= upr } where lwr and upr are vectors, then prox = @(x,t) max( min(upr,x), lwr ); prox_brk_pts = @(t) [lwr,upr]; (Note: this is a matrix) 4. if h(x) is the hinge-loss h(x) = max( 1-x, 0 ), then prox = @(x,t) 1 + (x-1).*( x > 1 ) + (x + t - 1).*( x + t < 1 ); prox_brk_pts = @(t)[ones(size(t)), 1-t]; 5. if h(x) is the indicator function of the l_infinity ball, then prox = @(x,t) sign(x).*min( 1, abs(x) ); prox_brk_pts = @(t) [-ones(size(t)),ones(size(t))]; Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 Python version directly translated from Matlab version (including comments): A. Asensio Ramos (March 12, 2015) """ if (len(u) == 0): u = np.asarray([[0]]) if (np.all(u == 0)): # Diagonal scaling NO_U = True else: NO_U = False if (NO_U): uinv = 0 else: uinv = u / d / np.sqrt(1.0 + u.T.dot(u/d)) # Check for positive definiteness of matrix V in a special case (warning: not in all cases) if ((plusminus < 0) & (np.all(d == d[0]))): minE = d[0] + plusminus * np.linalg.norm(u) if (minE <= 0): print "The scaling matrix is not positive definite" sys.exit(1) # In all cases, we find prox_h^V, but how we define V # in terms of d and u depends on "INVERT" if (invert): # So V^{-1} = diag(d) + sigma*u*u' # and V = diag(1./d) - sigma*uinv*uinv' Vinv = lambda y : d / y + plusminus * (u.T.dot(y)).dot(u) dd = 1.0 / d uu = uinv plusminus = -plusminus if (NO_U): ud = u / np.sqrt(1.0+u*(u/d)) else: ud = u / np.sqrt(1.0+u.T.dot(u/d)) dInv = 1.0 / dd else: # So V = diag(d) + sigma*u*u' # and V^(-1) = diag(1./d) - sigma*uinv*uinv' Vinv = lambda y : y / d - plusminus * (uinv.T.dot(y)).dot(uinv) dd = d.copy() uu = u.copy() ud = uu / dd dInv = 1.0 / dd if (NO_U): uu = 0 ud = 0 # We make a change of variables, e.g., x <-- lambda*.x # change x0 <-- lambda.*x0, linTerm <-- linTerm./lambda # and V <-- diag(1./lambda)*V*diag(1./lambda). Because V is defined # implicitly, and depends on INVERT, this is a bit of a headache. # We'll do some changes here, and some later in the code # e.g., combine linTerm and V scaling so we don't have to redefine Vinv if (Lambda != None): if (np.any(Lambda) == 0): print "scaling factor lambda must be non-zero" # note that lambda < 0 should be OK x0 *= Lambda # Scale V = diag(dd) + sigma*uu*uu' by V <-- diag(1./lambda)*V*diag(1./lambda) dd /= Lambda**2 uu /= Lambda ud *= Lambda dInv = 1.0 / dd t = prox_brk_pts(1.0 / dd) if ((linTerm != None)): if (np.linalg.norm(linTerm) >= 0): if (Lambda != None): x0 -= Vinv(linTerm) else: # V is scaled V <-- diag(1./lambda)*V*diag(1./lambda) # so Vinv is scaled the opposite. # linTerm is scaled linTerm <== linTerm./lambda % V is scaled V <-- diag(1./lambda)*V*diag(1./lambda) # so Vinv is scaled the opposite. # linTerm is scaled linTerm <== linTerm./lambda x0 -= Lambda * Vinv(linTerm) # The main heart X = lambda a : prox(x0 - plusminus * a * ud, dInv) # Only return if we have only diagonal scaling if (NO_U): # In this case, alpha is irrelevant x = prox(x0, dInv) if (Lambda != None): # Undo the lambda scaling x /= Lambda return x brk_pts = plusminus * (dd/uu) * (x0-t) brk_pts = np.unique(brk_pts) brk_pts = brk_pts[np.where(np.isfinite(brk_pts))] # Main loop # Lower bound are a for which p<=0 # Upper bound are a for which p>0 # If a is increasing, so is p(a) lwrBnd = 0 uprBnd = len(brk_pts) iMax = int(np.ceil(np.log2(len(brk_pts))) + 1) for i in range(iMax): if (uprBnd - lwrBnd <= 1): if (verbose): print "Bounds are too close" break j = int(round(np.mean([lwrBnd,uprBnd]))) if (verbose): print "j is {0} (bounds were [{1},{2}])".format(j,lwrBnd,uprBnd) if (j == lwrBnd): j += 1 elif (j == uprBnd): j -= 1 a = brk_pts[j] x = X(a) p = a + uu.T.dot(x0-x) if (p > 0): uprBnd = j elif (p < 0): lwrBnd = j cnt = i # Number of iterations # Now, determine linear part, which we infer from two points. # If lwr/upr bounds are infinite, we take special care # e.g., we make a new "a" slightly lower/bigger, and use this # to extract linear part. if (lwrBnd == 0): a2 = brk_pts[uprBnd] a1 = a2 - 10 # Arbitrary aBounds = np.asarray([-np.inf, a2]) elif (uprBnd == len(brk_pts)): a1 = brk_pts[lwrBnd] a2 = a1 + 10 # Arbitrary aBounds = np.asarray([a1,np.inf]) else: # In general case, we can infer linear part from the two break points a1 = brk_pts[lwrBnd] a2 = brk_pts[uprBnd] aBounds = np.asarray([a1, a2]) x1 = X(a1) x2 = X(a2) dx = (x2-x1) / (a2-a1) # Thus for a in (a1,a2), x(a) = x1 + (a-a1)*dx # Solve 0 = a + dot( uu, y - (x1 + (a-a1)*dx ) ) # = a + dot(uu,y - x1 + a1*dx ) - a*dot(uu,dx) # so: a = uu.T.dot(x0 - x1 + a1 * dx) / (-1.0 + uu.T.dot(dx)) if ((a < aBounds[0]) | (a > aBounds[1])): print "alpha is not the correct range" # The solution x = X(a) if (Lambda != None): x /= Lambda if (verbose): print "Took {0} of {1} iterations, lwrBnd is {2} {3}".format(i,iMax,lwrBnd, len(brk_pts)) return x
{ "repo_name": "aasensio/pyzeroSR1", "path": "pyzeroSR1/proxes/prox_rank1_generic.py", "copies": "1", "size": "7494", "license": "mit", "hash": -8934869864429730000, "line_mean": 29.7172131148, "line_max": 139, "alpha_frac": 0.5793968508, "autogenerated": false, "ratio": 2.4773553719008263, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.35567522227008264, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_hinge"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_hinge(*args, **kwargs): """ PROX_RANK1_HINGE returns the scaled proximity operator for the hinge loss x = prox_rank1_hinge( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) = sum(max(0,1-x)), a.k.a., the hinge-loss There are also variants: x = prox_rank1_hinge( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 See also prox_rank1_generic.m Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : (np.abs(x)-t).clip(0) prox_brk_pts = lambda s : np.hstack((np.ones(s.shape),1.0-s)) return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyiacsun", "path": "pyiacsun/sparse/proxes_rank1/prox_rank1_hinge.py", "copies": "2", "size": "1324", "license": "mit", "hash": 2633576023687026000, "line_mean": 31.3170731707, "line_max": 81, "alpha_frac": 0.6344410876, "autogenerated": false, "ratio": 2.456400742115028, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4090841829715028, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_l0"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def hardThreshold(x,t): xOut = x.copy() xOut[x <= t] = 0.0 return xOut def prox_rank1_l0(*args, **kwargs): """ PROX_RANK1_L0 returns the scaled proximity operator for the l0 norm x = prox_rank1_l0( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) = ||x||_0 (the "l-0" norm) There are also variants: x = prox_rank1_l0( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Python : A. Asensio Ramos (March 18, 2015) """ prox = lambda x,t: hardThreshold(x,t) prox_brk_pts = lambda s : np.hstack((-s,s)) return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyiacsun", "path": "pyiacsun/sparse/proxes_rank1/prox_rank1_l0.py", "copies": "2", "size": "1147", "license": "mit", "hash": -5462837545350016000, "line_mean": 27.7, "line_max": 68, "alpha_frac": 0.5902353967, "autogenerated": false, "ratio": 2.4249471458773786, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40151825425773785, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_l1"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_l1(*args, **kwargs): """ PROX_RANK1_L1 returns the scaled proximity operator for the l1 norm x = prox_rank1_l1( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) = ||x||_1 (the "l-1" norm) There are also variants: x = prox_rank1_l1( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : np.sign(x) * (np.abs(x)-t).clip(0) prox_brk_pts = lambda s : np.hstack((-s,s)) return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyiacsun", "path": "pyiacsun/sparse/proxes_rank1/prox_rank1_l1.py", "copies": "2", "size": "1275", "license": "mit", "hash": 3621214863879252500, "line_mean": 32.5789473684, "line_max": 81, "alpha_frac": 0.6109803922, "autogenerated": false, "ratio": 2.4661508704061896, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40771312626061895, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_l1pos"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_l1pos(*args, **kwargs): """ PROX_RANK1_L1 returns the scaled proximity operator for the l1 norm x = prox_rank1_l1pos( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) = ||x||_1 (the "l-1" norm) There are also variants: x = prox_rank1_l1pos( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : (np.abs(x)-t).clip(0) prox_brk_pts = lambda s : s return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyzeroSR1", "path": "pyzeroSR1/proxes/prox_rank1_l1pos.py", "copies": "2", "size": "1258", "license": "mit", "hash": 5389360363984818000, "line_mean": 32.1315789474, "line_max": 81, "alpha_frac": 0.6160572337, "autogenerated": false, "ratio": 2.4812623274161734, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4097319561116174, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_linf"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_linf(*args, **kwargs): """ PROJ_RANK1_LINF returns the scaled proximity operator for l_infinity norm constraints x = proj_rank1_linf( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) is the indicator function of the l_infinity ball, i.e., { x | norm(x,inf) <= 1 } To scale the ball, just use the scaling parameter "lambda" (see below) There are also variants: x = proj_rank1_linf( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 See also prox_rank1_generic.m Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : np.sign(x) * np.abs(x).clip(0,1) prox_brk_pts = lambda s : np.hstack((-np.ones(s.shape),np.ones(s.shape))) return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyzeroSR1", "path": "pyzeroSR1/proxes/prox_rank1_linf.py", "copies": "2", "size": "1470", "license": "mit", "hash": -548712168179252860, "line_mean": 33.2093023256, "line_max": 86, "alpha_frac": 0.6428571429, "autogenerated": false, "ratio": 2.5257731958762886, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41686303387762885, "avg_score": null, "num_lines": null }
__all__ = ["prox_rank1_Rplus"] from .prox_rank1_generic import prox_rank1_generic import numpy as np def prox_rank1_Rplus(*args, **kwargs): """ PROJ_RANK1_RPLUS returns the scaled proximity operator for non-negativity constraints x = proj_rank1_Rplus( x0, D, u ) where x = argmin_{x} h(x) + 1/2||x-x0||^2_{V} and V^{-1} = D + u*u' (or diag(D) + u*u' if D is a vector) "D" must be diagonal and positive. "u" can be any vector. Here, h(x) is the indicator function of the set { x : x >= 0 } There are also variants: x = proj_rank1_Rplus( x0, D, u, lambda, linTerm, sigma, inverse) returns x = argmin_{x} h(lambda.*x) + 1/2||x-x0||^2_{V} + linTerm'*x and either V^{-1} = D + sigma*u*u' if "inverse" is true (default) or V = D + sigma*u*u' if "inverse" is false and in both cases, "sigma" is either +1 (default) or -1. "lambda" should be non-zero Stephen Becker, Feb 26 2014, stephen.beckr@gmail.com Reference: "A quasi-Newton proximal splitting method" by S. Becker and J. Fadili NIPS 2012, http://arxiv.org/abs/1206.1156 See also prox_rank1_generic.m Python version: A. Asensio Ramos (March 12, 2015) """ prox = lambda x, t : x.clip(0) prox_brk_pts = lambda s : 0 return prox_rank1_generic(prox, prox_brk_pts, *args, **kwargs)
{ "repo_name": "aasensio/pyiacsun", "path": "pyiacsun/sparse/proxes_rank1/prox_rank1_Rplus.py", "copies": "2", "size": "1300", "license": "mit", "hash": -8607535249465865000, "line_mean": 30.7317073171, "line_max": 86, "alpha_frac": 0.6407692308, "autogenerated": false, "ratio": 2.5390625, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.865642513529068, "avg_score": 0.10468131910186407, "num_lines": 41 }
__all__ = ['PublishSubscribeQueue'] import events from util import priority class PSPut(events.Operation): __slots__ = ('queue', 'message', 'key') def __init__(self, queue, message, key, **kws): super(PSPut, self).__init__(**kws) self.queue = queue self.message = message self.key = key def process(self, sched, coro): super(PSPut, self).process(sched, coro) self.queue.messages.append(self.message) result = [self.message] for getkey in self.queue.active_subscribers: self.queue.subscribers[getkey] += 1 getop, getcoro = self.queue.active_subscribers[getkey] getop.result = result if getop.prio: sched.active.appendleft((getop, getcoro)) else: sched.active.append((getop, getcoro)) self.queue.active_subscribers.clear() if self.prio & priority.CORO: return self, coro else: if self.prio & priority.OP: sched.active.appendleft((self, coro)) else: sched.active.append((self, coro)) return None, None class PSGet(events.TimedOperation): __slots__ = ('queue', 'result', 'key') def __init__(self, queue, key, **kws): super(PSGet, self).__init__(**kws) self.queue = queue self.key = key def process(self, sched, coro): super(PSGet, self).process(sched, coro) key = self.key or coro assert key in self.queue.subscribers level = self.queue.subscribers[key] queue_level = len(self.queue.messages) if level < queue_level: self.result = self.queue.messages[level:] if level \ else self.queue.messages self.queue.subscribers[key] = queue_level return self, coro else: self.queue.active_subscribers[key] = self, coro def finalize(self, sched): super(PSGet, self).finalize(sched) return self.result def cleanup(self, sched, coro): for key in self.queue.active_subscribers: getop, getcoro = self.queue.active_subscribers[key] if coro is getcoro: assert getop is self del self.queue.active_subscribers[key] return True class PSSubscribe(events.Operation): __slots__ = ('queue', 'key') def __init__(self, queue, key, **kws): super(PSSubscribe, self).__init__(**kws) self.queue = queue self.key = key def process(self, sched, coro): super(PSSubscribe, self).process(sched, coro) self.queue.subscribers[self.key or coro] = 0 return self, coro class PSUnsubscribe(events.Operation): __slots__ = ('queue', 'key') def __init__(self, queue, key, **kws): super(PSUnsubscribe, self).__init__(**kws) self.queue = queue self.key = key def process(self, sched, coro): super(PSUnsubscribe, self).process(sched, coro) del self.queue.subscribers[self.key or coro] return self, coro class PublishSubscribeQueue: """A more robust replacement for the signal operations. A coroutine subscribes itself to a PublishSubscribeQueue and get new published messages with _fetch_ method. """ def __init__(self): self.messages = [] self.subscribers = {} self.active_subscribers = {} # holds waiting fetch ops def publish(self, message, key=None, **kws): """Put a message in the queue and updates any coroutine wating with fetch. *works as a coroutine operation*""" return PSPut(self, message, key, **kws) def subscribe(self, key=None, **kws): """Registers the calling coroutine to the queue. Sets the update index to 0 - on fetch, that coroutine will get all the messages from the queue. *works as a coroutine operation*""" return PSSubscribe(self, key, **kws) def unsubscribe(self, key=None, **kws): """Unregisters the calling coroutine to the queue. """ # TODO: unittest return PSUnsubscribe(self, key, **kws) def fetch(self, key=None, **kws): """Get all the new messages since the last fetch. Returns a list of messages. *works as a coroutine operation*""" return PSGet(self, key, **kws) def compact(self): """Compacts the queue: removes all the messages from the queue that have been fetched by all the subscribed coroutines. Returns the number of messages that have been removed.""" if self.subscribers: level = min(self.subscribers.itervalues()) if level: del self.messages[:level] return level else: level = len(self.messages) del self.messages[:] return level
{ "repo_name": "pombredanne/cogen", "path": "cogen/core/pubsub.py", "copies": "4", "size": "5046", "license": "mit", "hash": -5088642589033769000, "line_mean": 33.7872340426, "line_max": 78, "alpha_frac": 0.5743162901, "autogenerated": false, "ratio": 4.09910641754671, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0008880696205724201, "num_lines": 141 }
__all__ = ['PureTransformer', 'identity'] import logging import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from ..utils import Timer logger = logging.getLogger(__name__) # Helper class. A transformer that only does transformation and does not need to fit any internal parameters. class PureTransformer(BaseEstimator, TransformerMixin): def __init__(self, nparray=True, nparray_dtype=None, generator=False, **kwargs): super(PureTransformer, self).__init__() self.nparray = nparray self.nparray_dtype = nparray_dtype self.generator = generator if self.nparray and self.generator: raise ValueError('nparray and generator option cannot both be True.') #end def def fit(self, *args, **fit_params): return self def transform(self, X, y=None, **kwargs): timer = Timer() transformed = self._transform(X, y=y, **kwargs) if self.nparray: nparray_dtype = getattr(self, 'nparray_dtype', None) if nparray_dtype: transformed = np.array(transformed, dtype=nparray_dtype) else: transformed = np.array(transformed) if transformed.ndim == 1: transformed = transformed.reshape(transformed.shape[0], 1) #end if #end if logger.debug('Done <{}> transformation {}.'.format(type(self).__name__, timer)) return transformed #end def def _transform(self, X, y=None, **kwargs): if getattr(self, 'generator', False): return (self.transform_one(row, **kwargs) for row in X) return [self.transform_one(row, **kwargs) for row in X] #end def def transform_one(self, x, **kwargs): raise NotImplementedError('transform_one method needs to be implemented.') #end class def identity(x): return x
{ "repo_name": "skylander86/ycml", "path": "ycml/transformers/base.py", "copies": "1", "size": "1896", "license": "apache-2.0", "hash": -7926440608265588000, "line_mean": 29.5806451613, "line_max": 109, "alpha_frac": 0.6265822785, "autogenerated": false, "ratio": 4.034042553191489, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5160624831691489, "avg_score": null, "num_lines": null }
"""All pykechain configuration constants will be listed here.""" # # Configuration of async download of activity pdf exports # ASYNC_REFRESH_INTERVAL = 2 # seconds ASYNC_TIMEOUT_LIMIT = 100 # seconds # # Configuration of the retry options for the client requests based on `urlib3.utils.Retry`. # # How many connection-related errors to retry on. # These are errors raised before the request is sent to the remote server, # which we assume has not triggered the server to process the request. RETRY_ON_CONNECTION_ERRORS = 3 # times # How many times to retry on read errors. # These errors are raised after the request was sent to the server, so the request may have side-effects. RETRY_ON_READ_ERRORS = 2 # How many redirects to perform. Limit this to avoid infinite redirect loops. # A redirect is a HTTP response with a status code 301, 302, 303, 307 or 30 RETRY_ON_REDIRECT_ERRORS = 1 # Total number of retries to allow. Takes precedence over other counts RETRY_TOTAL = 10 # A backoff factor to apply between attempts after the second try (most errors are resolved # immediately by a second try without a delay). urllib3 will sleep for: # {backoff factor} * (2 ** ({number of total retries} - 1)) seconds. # If the backoff_factor is 0.1, then sleep() will sleep for [0.0s, 0.2s, 0.4s, …] between retries. RETRY_BACKOFF_FACTOR = 0.8 # Batching of parts when a large number of parts are requested at once PARTS_BATCH_LIMIT = 100 # number of parts # # API Paths and API Extra Parameters # API_PATH = { 'activities': 'api/activities.json', 'activity': 'api/activities/{activity_id}.json', 'activity_export': 'api/activities/{activity_id}/export', 'activity_move': 'api/activities/{activity_id}/move_activity', 'activities_bulk_clone': 'api/activities/bulk_clone', 'activities_bulk_update': 'api/activities/bulk_update', 'widgets_config': 'api/widget_config.json', 'widget_config': 'api/widget_config/{widget_config_id}.json', 'services': 'api/services.json', 'service': 'api/services/{service_id}.json', 'service_execute': 'api/services/{service_id}/execute', 'service_upload': 'api/services/{service_id}/upload', 'service_download': 'api/services/{service_id}/download', 'service_executions': 'api/service_executions.json', 'service_execution': 'api/service_executions/{service_execution_id}.json', 'service_execution_terminate': 'api/service_executions/{service_execution_id}/terminate', 'service_execution_notebook_url': 'api/service_executions/{service_execution_id}/notebook_url', 'service_execution_log': 'api/service_executions/{service_execution_id}/log', 'users': 'api/users.json', 'user_current': 'api/users/get_current_user', 'teams': 'api/teams.json', 'team': 'api/teams/{team_id}.json', 'team_add_members': 'api/teams/{team_id}/add_members', 'team_remove_members': 'api/teams/{team_id}/remove_members', 'versions': 'api/versions.json', 'associations': 'api/associations.json', # PIM3 'scope': 'api/v3/scopes/{scope_id}.json', 'scope_add_member': 'api/v3/scopes/{scope_id}/add_member', 'scope_remove_member': 'api/v3/scopes/{scope_id}/remove_member', 'scope_add_manager': 'api/v3/scopes/{scope_id}/add_manager', 'scope_remove_manager': 'api/v3/scopes/{scope_id}/remove_manager', 'scope_add_leadmember': 'api/v3/scopes/{scope_id}/add_leadmember', 'scope_remove_leadmember': 'api/v3/scopes/{scope_id}/remove_leadmember', 'scope_add_supervisor': 'api/v3/scopes/{scope_id}/add_supervisor', 'scope_remove_supervisor': 'api/v3/scopes/{scope_id}/remove_supervisor', 'scopes': 'api/v3/scopes.json', 'scopes_clone': 'api/v3/scopes/clone', 'parts': 'api/v3/parts.json', 'parts_new_instance': 'api/v3/parts/new_instance', 'parts_create_child_model': 'api/v3/parts/create_child_model', 'parts_create_proxy_model': 'api/v3/parts/create_proxy_model', 'parts_clone_model': 'api/v3/parts/clone_model', 'parts_clone_instance': 'api/v3/parts/clone_instance', 'parts_bulk_create': '/api/v3/parts/bulk_create_part_instances', 'parts_bulk_delete': '/api/v3/parts/bulk_delete_part_instances', 'parts_export': 'api/v3/parts/export', 'part': 'api/v3/parts/{part_id}.json', 'properties': 'api/v3/properties.json', 'properties_bulk_update': 'api/v3/properties/bulk_update', 'properties_create_model': 'api/v3/properties/create_model', 'property': 'api/v3/properties/{property_id}.json', 'property_upload': 'api/v3/properties/{property_id}/upload', 'property_download': 'api/v3/properties/{property_id}/download', 'widgets': 'api/widgets.json', 'widget': 'api/widgets/{widget_id}.json', 'widget_clear_associations': 'api/widgets/{widget_id}/clear_associations.json', 'widget_remove_associations': 'api/widgets/{widget_id}/remove_associations.json', 'widgets_update_associations': 'api/widgets/bulk_update_associations.json', 'widget_update_associations': 'api/widgets/{widget_id}/update_associations.json', 'widgets_set_associations': 'api/widgets/bulk_set_associations.json', 'widget_set_associations': 'api/widgets/{widget_id}/set_associations.json', 'widgets_bulk_create': 'api/widgets/bulk_create', 'widgets_bulk_delete': 'api/widgets/bulk_delete', 'widgets_bulk_update': 'api/widgets/bulk_update', 'widgets_schemas': 'api/widgets/schemas', 'notifications': 'api/v3/notifications.json', 'notification': 'api/v3/notifications/{notification_id}.json', 'notification_share_activity_link': 'api/v3/notifications/share_activity_link', 'notification_share_activity_pdf': 'api/v3/notifications/share_activity_pdf', 'banners': 'api/v3/banners.json', 'banner': 'api/v3/banners/{banner_id}.json', 'banner_active': 'api/v3/banners/active.json', 'expiring_downloads': 'api/downloads.json', 'expiring_download': 'api/downloads/{download_id}.json', 'expiring_download_download': 'api/downloads/{download_id}/download', 'expiring_download_upload': 'api/downloads/{download_id}/upload', 'contexts': 'api/v3/contexts/contexts.json', 'context': 'api/v3/contexts/contexts/{context_id}.json', 'context_link_activities': 'api/v3/contexts/contexts/{context_id}/link_activities', 'context_unlink_activities': 'api/v3/contexts/contexts/{context_id}/unlink_activities', # 'feature_collections': 'api/v3/contexts/feature_collections.json', # 'feature_collection': 'api/v3/contexts/feature_collections/{context_id}.json', # 'time_periods': 'api/v3/contexts/time_periods.json', # 'time_period': 'api/v3/contexts/time_periods/{context_id}.json' } API_QUERY_PARAM_ALL_FIELDS = {'fields': '__all__'} API_EXTRA_PARAMS = { 'activity': {'fields': ",".join( ['id', 'name', 'ref', 'description', 'created_at', 'updated_at', 'activity_type', 'classification', 'tags', 'progress', 'assignees_ids', 'start_date', 'due_date', 'status', 'parent_id', 'scope_id', 'customization', 'activity_options'])}, 'activities': {'fields': ",".join( ['id', 'name', 'ref', 'description', 'created_at', 'updated_at', 'activity_type', 'classification', 'tags', 'progress', 'assignees_ids', 'start_date', 'due_date', 'status', 'parent_id', 'scope_id', 'customization', 'activity_options'])}, 'banner': {'fields': ",".join( ['id', 'text', 'icon', 'is_active', 'active_from', 'active_until', 'url', 'created_at', 'updated_at'])}, 'banners': {'fields': ",".join( ['id', 'text', 'icon', 'is_active', 'active_from', 'active_until', 'url', 'created_at', 'updated_at'])}, 'scope': {'fields': ",".join( ['id', 'name', 'ref', 'text', 'created_at', 'updated_at', 'start_date', 'due_date', 'status', 'category', 'progress', 'members', 'team', 'tags', 'scope_options', 'team_id_name', 'workflow_root_id', 'catalog_root_id', 'app_root_id', 'product_model_id', 'product_instance_id', 'catalog_model_id', 'catalog_instance_id', ])}, 'scopes': {'fields': ",".join( ['id', 'name', 'ref', 'text', 'created_at', 'updated_at', 'start_date', 'due_date', 'status', 'category', 'progress', 'members', 'team', 'tags', 'scope_options', 'team_id_name', 'workflow_root_id', 'catalog_root_id', 'app_root_id', 'product_model_id', 'product_instance_id', 'catalog_model_id', 'catalog_instance_id', ])}, 'part': {'fields': ",".join( ['id', 'name', 'ref', 'description', 'created_at', 'updated_at', 'properties', 'category', 'classification', 'parent_id', 'multiplicity', 'value_options', 'property_type', 'value', 'output', 'order', 'part_id', 'scope_id', 'model_id', 'proxy_source_id_name', 'unit'])}, 'parts': {'fields': ",".join( ['id', 'name', 'ref', 'description', 'created_at', 'updated_at', 'properties', 'category', 'classification', 'parent_id', 'multiplicity', 'value_options', 'property_type', 'value', 'output', 'order', 'part_id', 'scope_id', 'model_id', 'proxy_source_id_name', 'unit'])}, 'properties': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'model_id', 'part_id', 'order', 'scope_id', 'category', 'property_type', 'value', 'value_options', 'output', 'description', 'unit'])}, 'property': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'model_id', 'part_id', 'order', 'scope_id', 'category', 'property_type', 'value', 'value_options', 'output', 'description', 'unit'])}, 'service': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'script_version', 'script_type', 'script_file_name', 'description', 'env_version', 'scope', 'run_as', 'trusted', 'verified_on', 'verification_results'])}, 'services': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'script_version', 'script_type', 'script_file_name', 'description', 'env_version', 'scope', 'run_as', 'trusted', 'verified_on', 'verification_results'])}, 'widgets': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'title', 'widget_type', 'meta', 'order', 'activity_id', 'parent_id', 'progress', 'has_subwidgets', 'scope_id'])}, 'widget': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'title', 'widget_type', 'meta', 'order', 'activity_id', 'parent_id', 'progress', 'has_subwidgets', 'scope_id'])}, 'notifications': {'fields': ",".join( ['id', 'subject', 'status', 'message', 'team', 'created_at', 'options', 'updated_at'])}, 'notification': {'fields': ",".join( ['id', 'subject', 'status', 'message', 'team', 'created_at', 'options', 'updated_at'])}, 'expiring_downloads': {'fields': ",".join( ['id', 'created_at', 'updated_at', 'expires_at', 'expires_in', 'content'])}, 'context': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'description', 'tags', 'context_type', 'activities', 'scope', 'options', 'feature_collection', 'start_date', 'due_date'])}, 'contexts': {'fields': ",".join( ['id', 'name', 'ref', 'created_at', 'updated_at', 'description', 'tags', 'context_type', 'activities', 'scope', 'options', 'feature_collection', 'start_date', 'due_date'])} }
{ "repo_name": "KE-works/pykechain", "path": "pykechain/defaults.py", "copies": "1", "size": "11391", "license": "apache-2.0", "hash": -6533789744990129000, "line_mean": 58.0103626943, "line_max": 116, "alpha_frac": 0.6396522961, "autogenerated": false, "ratio": 3.2577231121281462, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9384728427316147, "avg_score": 0.00252939618239992, "num_lines": 193 }
"""All pytest-django fixtures""" from __future__ import with_statement import os import warnings import pytest from . import live_server_helper from .db_reuse import (monkey_patch_creation_for_db_reuse, monkey_patch_creation_for_db_suffix) from .django_compat import is_django_unittest from .lazy_django import get_django_version, skip_if_no_django __all__ = ['_django_db_setup', 'db', 'transactional_db', 'admin_user', 'django_user_model', 'django_username_field', 'client', 'admin_client', 'rf', 'settings', 'live_server', '_live_server_helper'] # ############### Internal Fixtures ################ @pytest.fixture(scope='session') def _django_db_setup(request, _django_test_environment, _django_cursor_wrapper): """Session-wide database setup, internal to pytest-django""" skip_if_no_django() from .compat import setup_databases, teardown_databases # xdist if hasattr(request.config, 'slaveinput'): db_suffix = request.config.slaveinput['slaveid'] else: db_suffix = None monkey_patch_creation_for_db_suffix(db_suffix) _handle_south() if request.config.getvalue('nomigrations'): _disable_native_migrations() db_args = {} with _django_cursor_wrapper: if (request.config.getvalue('reuse_db') and not request.config.getvalue('create_db')): if get_django_version() >= (1, 8): db_args['keepdb'] = True else: monkey_patch_creation_for_db_reuse() # Create the database db_cfg = setup_databases(verbosity=pytest.config.option.verbose, interactive=False, **db_args) def teardown_database(): with _django_cursor_wrapper: teardown_databases(db_cfg) if not request.config.getvalue('reuse_db'): request.addfinalizer(teardown_database) def _django_db_fixture_helper(transactional, request, _django_cursor_wrapper): if is_django_unittest(request): return if not transactional and 'live_server' in request.funcargnames: # Do nothing, we get called with transactional=True, too. return django_case = None _django_cursor_wrapper.enable() request.addfinalizer(_django_cursor_wrapper.disable) if transactional: from django import get_version if get_version() >= '1.5': from django.test import TransactionTestCase as django_case else: # Django before 1.5 flushed the DB during setUp. # Use pytest-django's old behavior with it. def flushdb(): """Flush the database and close database connections""" # Django does this by default *before* each test # instead of after. from django.db import connections from django.core.management import call_command for db in connections: call_command('flush', interactive=False, database=db, verbosity=pytest.config.option.verbose) for conn in connections.all(): conn.close() request.addfinalizer(flushdb) else: from django.test import TestCase as django_case if django_case: case = django_case(methodName='__init__') case._pre_setup() request.addfinalizer(case._post_teardown) def _handle_south(): from django.conf import settings # NOTE: Django 1.7 does not have `management._commands` anymore, which # is used by South's `patch_for_test_db_setup` and the code below. if 'south' not in settings.INSTALLED_APPS or get_django_version() > (1, 7): return from django.core import management try: # if `south` >= 0.7.1 we can use the test helper from south.management.commands import patch_for_test_db_setup except ImportError: # if `south` < 0.7.1 make sure its migrations are disabled management.get_commands() management._commands['syncdb'] = 'django.core' else: # Monkey-patch south.hacks.django_1_0.SkipFlushCommand to load # initial data. # Ref: http://south.aeracode.org/ticket/1395#comment:3 import south.hacks.django_1_0 from django.core.management.commands.flush import ( Command as FlushCommand) class SkipFlushCommand(FlushCommand): def handle_noargs(self, **options): # Reinstall the initial_data fixture. from django.core.management import call_command # `load_initial_data` got introduces with Django 1.5. load_initial_data = options.get('load_initial_data', None) if load_initial_data or load_initial_data is None: # Reinstall the initial_data fixture. call_command('loaddata', 'initial_data', **options) # no-op to avoid calling flush return south.hacks.django_1_0.SkipFlushCommand = SkipFlushCommand patch_for_test_db_setup() def _disable_native_migrations(): from django.conf import settings from .migrations import DisableMigrations settings.MIGRATION_MODULES = DisableMigrations() # ############### User visible fixtures ################ @pytest.fixture(scope='function') def db(request, _django_db_setup, _django_cursor_wrapper): """Require a django test database This database will be setup with the default fixtures and will have the transaction management disabled. At the end of the test the outer transaction that wraps the test itself will be rolled back to undo any changes to the database (in case the backend supports transactions). This is more limited than the ``transactional_db`` resource but faster. If both this and ``transactional_db`` are requested then the database setup will behave as only ``transactional_db`` was requested. """ if 'transactional_db' in request.funcargnames \ or 'live_server' in request.funcargnames: request.getfuncargvalue('transactional_db') else: _django_db_fixture_helper(False, request, _django_cursor_wrapper) @pytest.fixture(scope='function') def transactional_db(request, _django_db_setup, _django_cursor_wrapper): """Require a django test database with transaction support This will re-initialise the django database for each test and is thus slower than the normal ``db`` fixture. If you want to use the database with transactions you must request this resource. If both this and ``db`` are requested then the database setup will behave as only ``transactional_db`` was requested. """ _django_db_fixture_helper(True, request, _django_cursor_wrapper) @pytest.fixture() def client(): """A Django test client instance.""" skip_if_no_django() from django.test.client import Client return Client() @pytest.fixture() def django_user_model(db): """The class of Django's user model.""" try: from django.contrib.auth import get_user_model except ImportError: assert get_django_version() < (1, 5) from django.contrib.auth.models import User as UserModel else: UserModel = get_user_model() return UserModel @pytest.fixture() def django_username_field(django_user_model): """The fieldname for the username used with Django's user model.""" try: return django_user_model.USERNAME_FIELD except AttributeError: assert get_django_version() < (1, 5) return 'username' @pytest.fixture() def admin_user(db, django_user_model, django_username_field): """A Django admin user. This uses an existing user with username "admin", or creates a new one with password "password". """ UserModel = django_user_model username_field = django_username_field try: user = UserModel._default_manager.get(**{username_field: 'admin'}) except UserModel.DoesNotExist: extra_fields = {} if username_field != 'username': extra_fields[username_field] = 'admin' user = UserModel._default_manager.create_superuser( 'admin', 'admin@example.com', 'password', **extra_fields) return user @pytest.fixture() def admin_client(db, admin_user): """A Django test client logged in as an admin user.""" from django.test.client import Client client = Client() client.login(username=admin_user.username, password='password') return client @pytest.fixture() def rf(): """RequestFactory instance""" skip_if_no_django() from django.test.client import RequestFactory return RequestFactory() class MonkeyPatchWrapper(object): def __init__(self, monkeypatch, wrapped_object): super(MonkeyPatchWrapper, self).__setattr__('monkeypatch', monkeypatch) super(MonkeyPatchWrapper, self).__setattr__('wrapped_object', wrapped_object) def __getattr__(self, attr): return getattr(self.wrapped_object, attr) def __setattr__(self, attr, value): self.monkeypatch.setattr(self.wrapped_object, attr, value, raising=False) def __delattr__(self, attr): self.monkeypatch.delattr(self.wrapped_object, attr) @pytest.fixture() def settings(monkeypatch): """A Django settings object which restores changes after the testrun""" skip_if_no_django() from django.conf import settings as django_settings return MonkeyPatchWrapper(monkeypatch, django_settings) @pytest.fixture(scope='session') def live_server(request): """Run a live Django server in the background during tests The address the server is started from is taken from the --liveserver command line option or if this is not provided from the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable. If neither is provided ``localhost:8081,8100-8200`` is used. See the Django documentation for it's full syntax. NOTE: If the live server needs database access to handle a request your test will have to request database access. Furthermore when the tests want to see data added by the live-server (or the other way around) transactional database access will be needed as data inside a transaction is not shared between the live server and test code. Static assets will be served for all versions of Django. Except for django >= 1.7, if ``django.contrib.staticfiles`` is not installed. """ skip_if_no_django() addr = request.config.getvalue('liveserver') if not addr: addr = os.getenv('DJANGO_LIVE_TEST_SERVER_ADDRESS') if not addr: addr = os.getenv('DJANGO_TEST_LIVE_SERVER_ADDRESS') if addr: warnings.warn('Please use DJANGO_LIVE_TEST_SERVER_ADDRESS' ' instead of DJANGO_TEST_LIVE_SERVER_ADDRESS.', DeprecationWarning) if not addr: addr = 'localhost:8081,8100-8200' server = live_server_helper.LiveServer(addr) request.addfinalizer(server.stop) return server @pytest.fixture(autouse=True, scope='function') def _live_server_helper(request): """Helper to make live_server work, internal to pytest-django. This helper will dynamically request the transactional_db fixture for a test which uses the live_server fixture. This allows the server and test to access the database without having to mark this explicitly which is handy since it is usually required and matches the Django behaviour. The separate helper is required since live_server can not request transactional_db directly since it is session scoped instead of function-scoped. """ if 'live_server' in request.funcargnames: request.getfuncargvalue('transactional_db')
{ "repo_name": "tomviner/pytest-django", "path": "pytest_django/fixtures.py", "copies": "1", "size": "12082", "license": "bsd-3-clause", "hash": 4425786446175451600, "line_mean": 33.3238636364, "line_max": 79, "alpha_frac": 0.6482370468, "autogenerated": false, "ratio": 4.228911445572279, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 352 }
"""All pytest-django fixtures""" from __future__ import with_statement import os import pytest from . import live_server_helper from .django_compat import is_django_unittest from .pytest_compat import getfixturevalue from .lazy_django import get_django_version, skip_if_no_django __all__ = ['django_db_setup', 'db', 'transactional_db', 'admin_user', 'django_user_model', 'django_username_field', 'client', 'admin_client', 'rf', 'settings', 'live_server', '_live_server_helper'] @pytest.fixture(scope='session') def django_db_modify_db_settings_xdist_suffix(request): skip_if_no_django() from django.conf import settings for db_settings in settings.DATABASES.values(): try: test_name = db_settings['TEST']['NAME'] except KeyError: test_name = None if not test_name: if db_settings['ENGINE'] == 'django.db.backends.sqlite3': return ':memory:' else: test_name = 'test_{}'.format(db_settings['NAME']) # Put a suffix like _gw0, _gw1 etc on xdist processes xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid') if test_name != ':memory:' and xdist_suffix is not None: test_name = '{}_{}'.format(test_name, xdist_suffix) db_settings.setdefault('TEST', {}) db_settings['TEST']['NAME'] = test_name @pytest.fixture(scope='session') def django_db_modify_db_settings(django_db_modify_db_settings_xdist_suffix): skip_if_no_django() @pytest.fixture(scope='session') def django_db_use_migrations(request): return not request.config.getvalue('nomigrations') @pytest.fixture(scope='session') def django_db_keepdb(request): return (request.config.getvalue('reuse_db') and not request.config.getvalue('create_db')) @pytest.fixture(scope='session') def django_db_setup( request, django_test_environment, django_db_blocker, django_db_use_migrations, django_db_keepdb, django_db_modify_db_settings, ): """Top level fixture to ensure test databases are available""" from .compat import setup_databases, teardown_databases setup_databases_args = {} if not django_db_use_migrations: _disable_native_migrations() if django_db_keepdb: if get_django_version() >= (1, 8): setup_databases_args['keepdb'] = True else: # Django 1.7 compatibility from .db_reuse import monkey_patch_creation_for_db_reuse with django_db_blocker.unblock(): monkey_patch_creation_for_db_reuse() with django_db_blocker.unblock(): db_cfg = setup_databases( verbosity=pytest.config.option.verbose, interactive=False, **setup_databases_args ) def teardown_database(): with django_db_blocker.unblock(): teardown_databases( db_cfg, verbosity=pytest.config.option.verbose, ) if not django_db_keepdb: request.addfinalizer(teardown_database) def _django_db_fixture_helper(transactional, request, django_db_blocker): if is_django_unittest(request): return if not transactional and 'live_server' in request.funcargnames: # Do nothing, we get called with transactional=True, too. return django_db_blocker.unblock() request.addfinalizer(django_db_blocker.restore) if transactional: from django.test import TransactionTestCase as django_case else: from django.test import TestCase as django_case test_case = django_case(methodName='__init__') test_case._pre_setup() request.addfinalizer(test_case._post_teardown) def _disable_native_migrations(): from django.conf import settings from .migrations import DisableMigrations settings.MIGRATION_MODULES = DisableMigrations() # ############### User visible fixtures ################ @pytest.fixture(scope='function') def db(request, django_db_setup, django_db_blocker): """Require a django test database This database will be setup with the default fixtures and will have the transaction management disabled. At the end of the test the outer transaction that wraps the test itself will be rolled back to undo any changes to the database (in case the backend supports transactions). This is more limited than the ``transactional_db`` resource but faster. If both this and ``transactional_db`` are requested then the database setup will behave as only ``transactional_db`` was requested. """ if 'transactional_db' in request.funcargnames \ or 'live_server' in request.funcargnames: getfixturevalue(request, 'transactional_db') else: _django_db_fixture_helper(False, request, django_db_blocker) @pytest.fixture(scope='function') def transactional_db(request, django_db_setup, django_db_blocker): """Require a django test database with transaction support This will re-initialise the django database for each test and is thus slower than the normal ``db`` fixture. If you want to use the database with transactions you must request this resource. If both this and ``db`` are requested then the database setup will behave as only ``transactional_db`` was requested. """ _django_db_fixture_helper(True, request, django_db_blocker) @pytest.fixture() def client(): """A Django test client instance.""" skip_if_no_django() from django.test.client import Client return Client() @pytest.fixture() def django_user_model(db): """The class of Django's user model.""" from django.contrib.auth import get_user_model return get_user_model() @pytest.fixture() def django_username_field(django_user_model): """The fieldname for the username used with Django's user model.""" return django_user_model.USERNAME_FIELD @pytest.fixture() def admin_user(db, django_user_model, django_username_field): """A Django admin user. This uses an existing user with username "admin", or creates a new one with password "password". """ UserModel = django_user_model username_field = django_username_field try: user = UserModel._default_manager.get(**{username_field: 'admin'}) except UserModel.DoesNotExist: extra_fields = {} if username_field != 'username': extra_fields[username_field] = 'admin' user = UserModel._default_manager.create_superuser( 'admin', 'admin@example.com', 'password', **extra_fields) return user @pytest.fixture() def admin_client(db, admin_user): """A Django test client logged in as an admin user.""" from django.test.client import Client client = Client() client.login(username=admin_user.username, password='password') return client @pytest.fixture() def rf(): """RequestFactory instance""" skip_if_no_django() from django.test.client import RequestFactory return RequestFactory() class SettingsWrapper(object): _to_restore = [] def __delattr__(self, attr): from django.test import override_settings override = override_settings() override.enable() from django.conf import settings delattr(settings, attr) self._to_restore.append(override) def __setattr__(self, attr, value): from django.test import override_settings override = override_settings(**{ attr: value }) override.enable() self._to_restore.append(override) def __getattr__(self, item): from django.conf import settings return getattr(settings, item) def finalize(self): for override in reversed(self._to_restore): override.disable() del self._to_restore[:] @pytest.yield_fixture() def settings(): """A Django settings object which restores changes after the testrun""" skip_if_no_django() wrapper = SettingsWrapper() yield wrapper wrapper.finalize() @pytest.fixture(scope='session') def live_server(request): """Run a live Django server in the background during tests The address the server is started from is taken from the --liveserver command line option or if this is not provided from the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable. If neither is provided ``localhost:8081,8100-8200`` is used. See the Django documentation for it's full syntax. NOTE: If the live server needs database access to handle a request your test will have to request database access. Furthermore when the tests want to see data added by the live-server (or the other way around) transactional database access will be needed as data inside a transaction is not shared between the live server and test code. Static assets will be automatically served when ``django.contrib.staticfiles`` is available in INSTALLED_APPS. """ skip_if_no_django() import django addr = (request.config.getvalue('liveserver') or os.getenv('DJANGO_LIVE_TEST_SERVER_ADDRESS')) if addr and django.VERSION >= (1, 11) and ':' in addr: request.config.warn('D001', 'Specifying a live server port is not supported ' 'in Django 1.11. This will be an error in a future ' 'pytest-django release.') if not addr: if django.VERSION < (1, 11): addr = 'localhost:8081,8100-8200' else: addr = 'localhost' server = live_server_helper.LiveServer(addr) request.addfinalizer(server.stop) return server @pytest.fixture(autouse=True, scope='function') def _live_server_helper(request): """Helper to make live_server work, internal to pytest-django. This helper will dynamically request the transactional_db fixture for a test which uses the live_server fixture. This allows the server and test to access the database without having to mark this explicitly which is handy since it is usually required and matches the Django behaviour. The separate helper is required since live_server can not request transactional_db directly since it is session scoped instead of function-scoped. """ if 'live_server' in request.funcargnames: getfixturevalue(request, 'transactional_db')
{ "repo_name": "vicky2135/lucious", "path": "oscar/lib/python2.7/site-packages/pytest_django/fixtures.py", "copies": "1", "size": "10509", "license": "bsd-3-clause", "hash": -1304098908836022500, "line_mean": 29.8181818182, "line_max": 85, "alpha_frac": 0.6620039966, "autogenerated": false, "ratio": 4.1002731174405, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00007030377122900253, "num_lines": 341 }
"""All pytest fixtures.""" import boto3 from collections import namedtuple import os import pytest from shapely import wkt import shutil import uuid import yaml from mapchete.cli.default.serve import create_app SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) TESTDATA_DIR = os.path.join(SCRIPT_DIR, "testdata") TEMP_DIR = os.path.join(TESTDATA_DIR, "tmp") S3_TEMP_DIR = "s3://mapchete-test/tmp/" + uuid.uuid4().hex ExampleConfig = namedtuple("ExampleConfig", ("path", "dict")) # flask test app for mapchete serve @pytest.fixture def app(dem_to_hillshade, cleantopo_br, geobuf, geojson, mp_tmpdir): """Dummy Flask app.""" return create_app( mapchete_files=[ dem_to_hillshade.path, cleantopo_br.path, geojson.path, geobuf.path, ], zoom=None, bounds=None, single_input_file=None, mode="overwrite", debug=True, ) # temporary directory for I/O tests @pytest.fixture(autouse=True) def mp_tmpdir(): """Setup and teardown temporary directory.""" shutil.rmtree(TEMP_DIR, ignore_errors=True) os.makedirs(TEMP_DIR) yield TEMP_DIR shutil.rmtree(TEMP_DIR, ignore_errors=True) # temporary directory for I/O tests @pytest.fixture def mp_s3_tmpdir(): """Setup and teardown temporary directory.""" def _cleanup(): for obj in ( boto3.resource("s3") .Bucket(S3_TEMP_DIR.split("/")[2]) .objects.filter(Prefix="/".join(S3_TEMP_DIR.split("/")[-2:])) ): obj.delete() _cleanup() yield S3_TEMP_DIR _cleanup() @pytest.fixture def wkt_geom(): """Example WKT geometry.""" return "Polygon ((2.8125 11.25, 2.8125 14.0625, 0 14.0625, 0 11.25, 2.8125 11.25))" # example files @pytest.fixture def http_raster(): """Fixture for HTTP raster.""" return "https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/1/0/0.tif" @pytest.fixture def http_tiledir(): """Fixture for HTTP TileDirectory.""" return "https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/" @pytest.fixture def s2_band(): """ Fixture for Sentinel-2 raster band. Original file: s3://sentinel-s2-l1c/tiles/33/T/WN/2016/4/3/0/B02.jp2 """ return os.path.join(TESTDATA_DIR, "s2_band.tif") @pytest.fixture def s2_band_jp2(): """ Fixture for Sentinel-2 raster band. Original file: s3://sentinel-s2-l1c/tiles/33/T/WN/2016/4/3/0/B02.jp2 """ return os.path.join(TESTDATA_DIR, "s2_band.jp2") @pytest.fixture def s2_band_remote(): """ Fixture for remote file on S3 bucket. """ return "s3://mapchete-test/4band_test.tif" @pytest.fixture def s3_metadata_json(): """ Fixture for s3://mapchete-test/metadata.json. """ return "s3://mapchete-test/metadata.json" @pytest.fixture def http_metadata_json(): """ Fixture for https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/metadata.json. """ return "https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/metadata.json" @pytest.fixture def old_style_metadata_json(): """ Fixture for old_style_metadata.json. """ return os.path.join(TESTDATA_DIR, "old_style_metadata.json") @pytest.fixture def old_geodetic_shape_metadata_json(): """ Fixture for old_geodetic_shape_metadata.json. """ return os.path.join(TESTDATA_DIR, "old_geodetic_shape_metadata.json") @pytest.fixture def landpoly(): """Fixture for landpoly.geojson.""" return os.path.join(TESTDATA_DIR, "landpoly.geojson") @pytest.fixture def landpoly_3857(): """Fixture for landpoly_3857.geojson""" return os.path.join(TESTDATA_DIR, "landpoly_3857.geojson") @pytest.fixture def aoi_br_geojson(): """Fixture for aoi_br.geojson""" return os.path.join(TESTDATA_DIR, "aoi_br.geojson") @pytest.fixture def geometrycollection(): """Fixture for geometrycollection.geojson""" return wkt.loads( "GEOMETRYCOLLECTION (LINESTRING (-100.9423828125 78.75, -100.8984375 78.75), LINESTRING (-100.2392578125 78.75, -99.9755859375 78.75), POLYGON ((-101.25 78.9697265625, -101.25 79.013671875, -101.2060546875 79.013671875, -101.2060546875 78.9697265625, -100.986328125 78.9697265625, -100.986328125 78.92578125, -101.0302734375 78.92578125, -101.0302734375 78.8818359375, -101.07421875 78.8818359375, -101.1181640625 78.8818359375, -101.1181640625 78.837890625, -101.162109375 78.837890625, -101.2060546875 78.837890625, -101.2060546875 78.7939453125, -100.9423828125 78.7939453125, -100.9423828125 78.75, -101.25 78.75, -101.25 78.9697265625)), POLYGON ((-100.8984375 78.75, -100.8984375 78.7939453125, -100.5908203125 78.7939453125, -100.546875 78.7939453125, -100.546875 78.837890625, -100.3271484375 78.837890625, -100.3271484375 78.7939453125, -100.2392578125 78.7939453125, -100.2392578125 78.75, -100.8984375 78.75)))" ) @pytest.fixture def cleantopo_br_tif(): """Fixture for cleantopo_br.tif""" return os.path.join(TESTDATA_DIR, "cleantopo_br.tif") @pytest.fixture def cleantopo_tl_tif(): """Fixture for cleantopo_tl.tif""" return os.path.join(TESTDATA_DIR, "cleantopo_tl.tif") @pytest.fixture def dummy1_3857_tif(): """Fixture for dummy1_3857.tif""" return os.path.join(TESTDATA_DIR, "dummy1_3857.tif") @pytest.fixture def dummy1_tif(): """Fixture for dummy1.tif""" return os.path.join(TESTDATA_DIR, "dummy1.tif") @pytest.fixture def dummy2_tif(): """Fixture for dummy2.tif""" return os.path.join(TESTDATA_DIR, "dummy2.tif") @pytest.fixture def invalid_tif(): """Fixture for invalid.tif""" return os.path.join(TESTDATA_DIR, "invalid.tif") @pytest.fixture def invalid_geojson(): """Fixture for invalid.geojson""" return os.path.join(TESTDATA_DIR, "invalid.geojson") @pytest.fixture def execute_kwargs_py(): """Fixture for execute_kwargs.py""" return os.path.join(TESTDATA_DIR, "execute_kwargs.py") @pytest.fixture def write_rasterfile_tags_py(): """Fixture for write_rasterfile_tags.py""" return os.path.join(TESTDATA_DIR, "write_rasterfile_tags.py") @pytest.fixture def import_error_py(): """Fixture for import_error.py""" return os.path.join(TESTDATA_DIR, "import_error.py") @pytest.fixture def malformed_py(): """Fixture for malformed.py""" return os.path.join(TESTDATA_DIR, "malformed.py") @pytest.fixture def syntax_error_py(): """Fixture for syntax_error.py""" return os.path.join(TESTDATA_DIR, "syntax_error.py") @pytest.fixture def execute_params_error_py(): """Fixture for execute_params_error.py""" return os.path.join(TESTDATA_DIR, "execute_params_error.py") @pytest.fixture def process_error_py(): """Fixture for process_error.py""" return os.path.join(TESTDATA_DIR, "process_error.py") @pytest.fixture def output_error_py(): """Fixture for output_error.py""" return os.path.join(TESTDATA_DIR, "output_error.py") @pytest.fixture def old_style_process_py(): """Fixture for old_style_process.py""" return os.path.join(TESTDATA_DIR, "old_style_process.py") # example mapchete configurations @pytest.fixture def custom_grid(): """Fixture for custom_grid.mapchete.""" path = os.path.join(TESTDATA_DIR, "custom_grid.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def deprecated_params(): """Fixture for deprecated_params.mapchete.""" path = os.path.join(TESTDATA_DIR, "deprecated_params.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def abstract_input(): """Fixture for abstract_input.mapchete.""" path = os.path.join(TESTDATA_DIR, "abstract_input.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def files_zooms(): """Fixture for files_zooms.mapchete.""" path = os.path.join(TESTDATA_DIR, "files_zooms.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def file_groups(): """Fixture for file_groups.mapchete.""" path = os.path.join(TESTDATA_DIR, "file_groups.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def baselevels(): """Fixture for baselevels.mapchete.""" path = os.path.join(TESTDATA_DIR, "baselevels.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def baselevels_output_buffer(): """Fixture for baselevels_output_buffer.mapchete.""" path = os.path.join(TESTDATA_DIR, "baselevels_output_buffer.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def baselevels_custom_nodata(): """Fixture for baselevels_custom_nodata.mapchete.""" path = os.path.join(TESTDATA_DIR, "baselevels_custom_nodata.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def mapchete_input(): """Fixture for mapchete_input.mapchete.""" path = os.path.join(TESTDATA_DIR, "mapchete_input.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def dem_to_hillshade(): """Fixture for dem_to_hillshade.mapchete.""" path = os.path.join(TESTDATA_DIR, "dem_to_hillshade.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def files_bounds(): """Fixture for files_bounds.mapchete.""" path = os.path.join(TESTDATA_DIR, "files_bounds.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def example_mapchete(): """Fixture for example.mapchete.""" path = os.path.join(SCRIPT_DIR, "example.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def zoom_mapchete(): """Fixture for zoom.mapchete.""" path = os.path.join(TESTDATA_DIR, "zoom.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def minmax_zoom(): """Fixture for minmax_zoom.mapchete.""" path = os.path.join(TESTDATA_DIR, "minmax_zoom.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_tl(): """Fixture for cleantopo_tl.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_tl.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_br(): """Fixture for cleantopo_br.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_br.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_remote(): """Fixture for cleantopo_remote.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_remote.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_br_tiledir(): """Fixture for cleantopo_br_tiledir.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_br_tiledir.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_br_tiledir_mercator(): """Fixture for cleantopo_br_tiledir_mercator.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_br_tiledir_mercator.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def cleantopo_br_mercator(): """Fixture for cleantopo_br_mercator.mapchete.""" path = os.path.join(TESTDATA_DIR, "cleantopo_br_mercator.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def geojson(): """Fixture for geojson.mapchete.""" path = os.path.join(TESTDATA_DIR, "geojson.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def geojson_s3(): """Fixture for geojson.mapchete with updated output path.""" path = os.path.join(TESTDATA_DIR, "geojson.mapchete") config = _dict_from_mapchete(path) config["output"].update(path=S3_TEMP_DIR) return ExampleConfig(path=None, dict=config) @pytest.fixture def geobuf(): """Fixture for geobuf.mapchete.""" path = os.path.join(TESTDATA_DIR, "geobuf.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def geobuf_s3(): """Fixture for geobuf.mapchete with updated output path.""" path = os.path.join(TESTDATA_DIR, "geobuf.mapchete") config = _dict_from_mapchete(path) config["output"].update(path=S3_TEMP_DIR) return ExampleConfig(path=None, dict=config) @pytest.fixture def flatgeobuf(): """Fixture for flatgeobuf.mapchete.""" path = os.path.join(TESTDATA_DIR, "flatgeobuf.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def flatgeobuf_s3(): """Fixture for flatgeobuf.mapchete with updated output path.""" path = os.path.join(TESTDATA_DIR, "flatgeobuf.mapchete") config = _dict_from_mapchete(path) config["output"].update(path=S3_TEMP_DIR) return ExampleConfig(path=None, dict=config) @pytest.fixture def geojson_tiledir(): """Fixture for geojson_tiledir.mapchete.""" path = os.path.join(TESTDATA_DIR, "geojson_tiledir.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def process_module(): """Fixture for process_module.mapchete""" path = os.path.join(TESTDATA_DIR, "process_module.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def gtiff_s3(): """Fixture for gtiff_s3.mapchete.""" path = os.path.join(TESTDATA_DIR, "gtiff_s3.mapchete") config = _dict_from_mapchete(path) config["output"].update(path=S3_TEMP_DIR) return ExampleConfig(path=None, dict=config) @pytest.fixture def output_single_gtiff(): """Fixture for output_single_gtiff.mapchete.""" path = os.path.join(TESTDATA_DIR, "output_single_gtiff.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def output_single_gtiff_cog(): """Fixture for output_single_gtiff_cog.mapchete.""" path = os.path.join(TESTDATA_DIR, "output_single_gtiff_cog.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def aoi_br(): """Fixture for aoi_br.mapchete.""" path = os.path.join(TESTDATA_DIR, "aoi_br.mapchete") return ExampleConfig(path=path, dict=_dict_from_mapchete(path)) @pytest.fixture def s3_example_tile(gtiff_s3): """Example tile for fixture.""" return (5, 15, 32) # helper functions def _dict_from_mapchete(path): config = yaml.safe_load(open(path).read()) config.update(config_dir=os.path.dirname(path)) return config
{ "repo_name": "ungarj/mapchete", "path": "test/conftest.py", "copies": "1", "size": "14819", "license": "mit", "hash": 7750986273821854000, "line_mean": 27.7189922481, "line_max": 930, "alpha_frac": 0.6940414333, "autogenerated": false, "ratio": 2.9834910408697404, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.417753247416974, "avg_score": null, "num_lines": null }
__all__ = ['PythonCAPISubProgram'] import sys from parser.api import TypeDecl, TypeStmt, Module from wrapper_base import * from py_wrap_type import * class PythonCAPISubProgram(WrapperBase): """ Fortran subprogram hooks. """ header_template_f77 = '''\ #define %(name)s_f F_FUNC(%(name)s, %(NAME)s) ''' extern_template_f77 = '''\ extern void %(name)s_f(%(ctype_args_f_clist)s); ''' objdecl_template_doc = '''\ static char %(cname)s__doc[] = ""; ''' module_method_template = '''\ {"%(pyname)s", (PyCFunction)%(cname)s, METH_VARARGS | METH_KEYWORDS, %(cname)s__doc},''' capi_code_template = '''\ static PyObject* %(cname)s(PyObject *capi_self, PyObject *capi_args, PyObject *capi_keywds) { PyObject * volatile capi_buildvalue = NULL; volatile int f2py_success = 1; %(decl_list)s static char *capi_kwlist[] = {%(kw_clist+optkw_clist+extrakw_clist+["NULL"])s}; if (PyArg_ParseTupleAndKeywords(capi_args,capi_keywds, "%(pyarg_format_elist)s", %(["capi_kwlist"]+pyarg_obj_clist)s)) { %(frompyobj_list)s %(call_list)s f2py_success = !PyErr_Occurred(); if (f2py_success) { %(pyobjfrom_list)s capi_buildvalue = Py_BuildValue("%(return_format_elist)s" %(return_obj_clist)s); %(clean_pyobjfrom_list)s } %(clean_call_list)s %(clean_frompyobj_list)s } return capi_buildvalue; } ''' header_template_module = ''' #define %(name)s_f (*%(name)s_func_ptr) #define %(init_func)s_f F_FUNC(%(init_func)s, %(INIT_FUNC)s) ''' typedef_template_module = ''' typedef void (*%(name)s_functype)(%(ctype_args_f_clist)s); typedef void (*%(init_func)s_c_functype)(%(name)s_functype); ''' extern_template_module = '''\ extern void %(init_func)s_f(%(init_func)s_c_functype); static %(name)s_functype %(name)s_func_ptr; ''' objdecl_template_module = ''' ''' fortran_code_template_module = ''' subroutine %(init_func)s(init_func_c) use %(mname)s external init_func_c call init_func_c(%(name)s) end ''' c_code_template_module = ''' static void %(init_func)s_c(%(name)s_functype func_ptr) { %(name)s_func_ptr = func_ptr; } ''' module_init_template_module = ''' %(init_func)s_f(%(init_func)s_c); ''' def __init__(self, parent, block): WrapperBase.__init__(self) self.name = name = pyname = block.name self.cname = cname = '%s_%s' % (parent.cname,name) defined = parent.defined_capi_codes if cname in defined: return defined.append(cname) self.info('Generating interface for %s %s: %s' % (parent.modulename, block.__class__.__name__, cname)) self.parent = parent if pyname.startswith('f2pywrap_'): pyname = pyname[9:] self.pyname = pyname self.header_template = '' self.extern_template = '' self.module_init_template = '' self.typedef_template = '' self.c_code_template = '' self.objdecl_template = '' self.fortran_code_template = '' WrapperCPPMacro(parent, 'F_FUNC') if isinstance(block.parent, Module): self.mname = block.parent.name self.init_func = '%s_init' % (name) self.typedef_template += self.typedef_template_module self.header_template += self.header_template_module self.fortran_code_template += self.fortran_code_template_module self.module_init_template += self.module_init_template_module self.objdecl_template += self.objdecl_template_module self.c_code_template += self.c_code_template_module self.extern_template += self.extern_template_module else: self.extern_template += self.extern_template_f77 self.header_template += self.header_template_f77 self.objdecl_template += self.objdecl_template_doc self.decl_list = [] self.kw_list = [] self.optkw_list = [] self.extrakw_list = [] self.pyarg_format_list = [] self.pyarg_obj_list = [] self.frompyobj_list = [] self.call_list = [] self.pyobjfrom_list = [] self.return_format_list = [] self.return_obj_list = [] self.buildvalue_list = [] self.clean_pyobjfrom_list = [] self.clean_call_list = [] self.clean_frompyobj_list = [] args_f = [] extra_args_f = [] ctype_args_f = [] extra_ctype_args_f = [] argindex = -1 for argname in block.args: argindex += 1 var = block.a.variables[argname] typedecl = var.get_typedecl() PythonCAPIType(parent, typedecl) ti = PyTypeInterface(typedecl) if var.is_intent_in(): self.kw_list.append('"%s"' % (argname)) if var.is_scalar(): if isinstance(typedecl, TypeStmt): if var.is_intent_in(): self.pyarg_format_list.append('O&') self.pyarg_obj_list.append('\npyobj_to_%s_inplace, &%s' % (ti.ctype, argname)) else: self.frompyobj_list.append('%s = (%s*)pyobj_from_%s(NULL);' % (argname,ti.otype,ti.ctype)) if not var.is_intent_out(): self.clean_frompyobj_list.append('Py_DECREF(%s);' % (argname)) self.decl_list.append('%s* %s = NULL;' % (ti.otype, argname)) args_f.append('%s->data' % (argname)) # is_scalar ctype_args_f.append(ti.ctype) else: if var.is_intent_in(): self.pyarg_format_list.append('O&') self.pyarg_obj_list.append('\npyobj_to_%s, &%s' % (ti.ctype, argname)) assert not isinstance(typedecl, TypeDecl) if ti.ctype=='f2py_string0': if not var.is_intent_in(): assert not var.is_intent_out(),'intent(out) not implemented for "%s"' % (var) self.decl_list.append('%s %s = {NULL,0};' % (ti.ctype, argname)) args_f.append('%s.data' % argname) # is_scalar ctype_args_f.append('char*') extra_ctype_args_f.append('int') extra_args_f.append('%s.len' % argname) self.clean_frompyobj_list.append(\ 'if (%s.len) free(%s.data);' % (argname,argname)) else: self.decl_list.append('%s %s;' % (ti.ctype, argname)) args_f.append('&'+argname) # is_scalar ctype_args_f.append(ti.ctype+'*') if var.is_intent_out(): # and is_scalar if isinstance(typedecl, TypeStmt): self.return_format_list.append('N') self.return_obj_list.append('\n%s' % (argname)) else: self.return_format_list.append('O&') self.return_obj_list.append('\npyobj_from_%s, &%s' % (ti.ctype, argname)) else: print `ti,var.dimension,var.bounds` assert var.is_scalar(),'array support not implemented: "%s"' % (var) self.call_list.append('%s_f(%s);' % (name,', '.join(args_f+extra_args_f))) self.ctype_args_f_list = ctype_args_f + extra_ctype_args_f if not self.ctype_args_f_list: self.ctype_args_f_list.append('void') self.clean_pyobjfrom_list.reverse() self.clean_call_list.reverse() self.clean_frompyobj_list.reverse() if self.return_obj_list: self.return_obj_list.insert(0,'') parent.apply_templates(self) return
{ "repo_name": "santisiri/popego", "path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/f2py/lib/py_wrap_subprogram.py", "copies": "1", "size": "7933", "license": "bsd-3-clause", "hash": -5116902610030664000, "line_mean": 36.7761904762, "line_max": 114, "alpha_frac": 0.5371234085, "autogenerated": false, "ratio": 3.464192139737991, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4501315548237991, "avg_score": null, "num_lines": null }
__all__ = ['PythonCAPIType', 'PyTypeInterface'] from wrapper_base import * from parser.api import CHAR_BIT, Module, declaration_type_spec, \ TypeDecl, TypeStmt, Subroutine, Function, Integer, Real,\ DoublePrecision, Complex, DoubleComplex, Logical, Character, \ Byte class PyTypeInterface: def __init__(self, typedecl): if isinstance(typedecl, TypeStmt): typedecl = typedecl.get_type_decl(typedecl.name) self._typedecl = typedecl if isinstance(typedecl, TypeDecl): self.name = name = typedecl.name tname = 'f2py_type_%s_' % (name) else: if isinstance(typedecl,(Integer,Byte)): tname = 'npy_int' elif isinstance(typedecl,(Real, DoublePrecision)): tname = 'npy_float' elif isinstance(typedecl,(Complex, DoubleComplex)): tname = 'npy_complex' elif isinstance(typedecl,Logical): tname = 'f2py_bool' elif isinstance(typedecl,Character): tname = 'f2py_string' else: raise NotImplementedError,`typedecl.__class__` bitsize = typedecl.get_bit_size() self.ctype = ctype = '%s%s' % (tname,bitsize) self.bits = bitsize self.bytes = bitsize / CHAR_BIT if isinstance(typedecl, TypeDecl): self.otype = '%sObject' % (ctype) self.ftype = 'TYPE(%s)' % (name) return def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._typedecl) def __str__(self): s = [] for k,v in self.__dict__.items(): if k.startswith('_'): continue s.append('%s=%s' % (k,v)) return 'PyTypeInterface(%s)' % (', '.join(s)) class PythonCAPIType(WrapperBase): """ Fortran type hooks. """ def __init__(self, parent, typedecl): WrapperBase.__init__(self) if isinstance(typedecl, tuple(declaration_type_spec)): if isinstance(typedecl, TypeStmt): type_decl = typedecl.get_type_decl(typedecl.name) assert type_decl is not None,"%s %s" % (typedecl,typedecl.name) PythonCAPIDerivedType(parent, type_decl) else: PythonCAPIIntrinsicType(parent, typedecl) elif isinstance(typedecl, TypeDecl): PythonCAPIDerivedType(parent, typedecl) else: raise NotImplementedError,`self.__class__,typedecl.__class__` return class PythonCAPIIntrinsicType(WrapperBase): """ Fortran intrinsic type hooks. """ capi_code_template_scalar = ''' static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value) { PyObject* obj = PyArrayScalar_New(%(Cls)s); #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_from_%(ctype)s(value=%%"%(CTYPE)s_FMT")\\n",*value); #endif if (obj==NULL) /* TODO: set exception */ return NULL; PyArrayScalar_ASSIGN(obj,%(Cls)s,*value); return obj; } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (obj==NULL) ; else if (PyArray_IsScalar(obj,%(Cls)s)) { *value = PyArrayScalar_VAL(obj,%(Cls)s); return_value = 1; } else if (PySequence_Check(obj)) { if (PySequence_Size(obj)==1) return_value = pyobj_to_%(ctype)s(PySequence_GetItem(obj,0),value); } else { PyObject* sc = Py%(Cls)sArrType_Type.tp_new( &Py%(Cls)sArrType_Type,Py_BuildValue("(O)",obj),NULL); if (sc==NULL) ; else if (PyArray_IsScalar(sc, Generic)) return_value = pyobj_to_%(ctype)s(sc,value); else return_value = pyobj_to_%(ctype)s(PyArray_ScalarFromObject(sc),value); } if (!return_value && !PyErr_Occurred()) { PyObject* r = PyString_FromString("Failed to convert "); PyString_ConcatAndDel(&r, PyObject_Repr(PyObject_Type(obj))); PyString_ConcatAndDel(&r, PyString_FromString(" to C %(ctype)s")); PyErr_SetObject(PyExc_TypeError,r); } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) if (PyErr_Occurred()) { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); else fprintf(stderr,"pyobj_to_%(ctype)s: PyErr_Occurred()=%%p\\n", PyErr_Occurred()); } else { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s: value=%%"%(CTYPE)s_FMT"\\n", *value); else fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); } #endif return return_value; } ''' capi_code_template_complex_scalar = ''' static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value) { PyObject* obj = PyArrayScalar_New(%(Cls)s); #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_from_%(ctype)s(value=(%%"%(FCTYPE)s_FMT",%%"%(FCTYPE)s_FMT"))\\n",value->real, value->imag); #endif if (obj==NULL) /* TODO: set exception */ return NULL; PyArrayScalar_ASSIGN(obj,%(Cls)s,*value); return obj; } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (obj==NULL) ; else if (PyArray_IsScalar(obj,%(Cls)s)) { value->real = PyArrayScalar_VAL(obj,%(Cls)s).real; value->imag = PyArrayScalar_VAL(obj,%(Cls)s).imag; return_value = 1; } else if (PySequence_Check(obj)) { if (PySequence_Size(obj)==1) return_value = pyobj_to_%(ctype)s(PySequence_GetItem(obj,0),value); else if (PySequence_Size(obj)==2) { return_value = pyobj_to_%(fctype)s(PySequence_GetItem(obj,0),&(value->real)) && pyobj_to_%(fctype)s(PySequence_GetItem(obj,1),&(value->imag)); } } else { PyObject* sc = Py%(Cls)sArrType_Type.tp_new( &Py%(Cls)sArrType_Type,Py_BuildValue("(O)",obj),NULL); if (sc==NULL) ; else if (PyArray_IsScalar(sc, Generic)) return_value = pyobj_to_%(ctype)s(sc,value); else return_value = pyobj_to_%(ctype)s(PyArray_ScalarFromObject(sc),value); } if (!return_value && !PyErr_Occurred()) { PyObject* r = PyString_FromString("Failed to convert "); PyString_ConcatAndDel(&r, PyObject_Repr(PyObject_Type(obj))); PyString_ConcatAndDel(&r, PyString_FromString(" to C %(ctype)s")); PyErr_SetObject(PyExc_TypeError,r); } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) if (PyErr_Occurred()) { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); else fprintf(stderr,"pyobj_to_%(ctype)s: PyErr_Occurred()=%%p\\n", PyErr_Occurred()); } else { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s: value=(%%"%(FCTYPE)s_FMT",%%"%(FCTYPE)s_FMT")\\n", value->real, value->imag); else fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); } #endif return return_value; } ''' capi_code_template_logical_scalar = ''' static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value) { #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_from_%(ctype)s(value=%%"%(ICTYPE)s_FMT")\\n",*value); #endif if (*value) { PyArrayScalar_RETURN_TRUE; } else { PyArrayScalar_RETURN_FALSE; } } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (obj==NULL) ; else if (PyArray_IsScalar(obj,Bool)) { *value = PyArrayScalar_VAL(obj,Bool); return_value = 1; } else { switch (PyObject_IsTrue(obj)) { case 0: *value = 0; return_value = 1; break; case -1: break; default: *value = 1; return_value = 1; } } if (!return_value && !PyErr_Occurred()) { PyObject* r = PyString_FromString("Failed to convert "); PyString_ConcatAndDel(&r, PyObject_Repr(PyObject_Type(obj))); PyString_ConcatAndDel(&r, PyString_FromString(" to C %(ctype)s")); PyErr_SetObject(PyExc_TypeError,r); } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) if (PyErr_Occurred()) { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); else fprintf(stderr,"pyobj_to_%(ctype)s: PyErr_Occurred()=%%p\\n", PyErr_Occurred()); } else { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s: value=%%"%(ICTYPE)s_FMT"\\n", *value); else fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); } #endif return return_value; } ''' capi_code_template_string_scalar = ''' static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value) { #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_from_%(ctype)s(value->data=\'%%s\')\\n",value->data); #endif PyArray_Descr* descr = PyArray_DescrNewFromType(NPY_STRING); descr->elsize = %(bytes)s; PyObject* obj = PyArray_Scalar(value->data, descr, NULL); if (obj==NULL) /* TODO: set exception */ return NULL; return obj; } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (PyString_Check(obj)) { int s = PyString_GET_SIZE(obj); memset(value->data, (int)\' \',%(bytes)s); return_value = !! strncpy(value->data,PyString_AS_STRING(obj),%(bytes)s); if (return_value && s<%(bytes)s) { memset(value->data + s, (int)\' \',%(bytes)s-s); } } else { return_value = pyobj_to_%(ctype)s(PyObject_Str(obj), value); } if (!return_value && !PyErr_Occurred()) { PyObject* r = PyString_FromString("Failed to convert "); PyString_ConcatAndDel(&r, PyObject_Repr(PyObject_Type(obj))); PyString_ConcatAndDel(&r, PyString_FromString(" to C %(ctype)s")); PyErr_SetObject(PyExc_TypeError,r); } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) if (PyErr_Occurred()) { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); else fprintf(stderr,"pyobj_to_%(ctype)s: PyErr_Occurred()=%%p\\n", PyErr_Occurred()); } else { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s: value->data=\'%%s\'\\n", value->data); else fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); } #endif return return_value; } ''' capi_code_template_string0_scalar = ''' static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value) { #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_from_%(ctype)s(value->len=%%d, value->data=\'%%s\')\\n",value->len, value->data); #endif PyArray_Descr* descr = PyArray_DescrNewFromType(NPY_STRING); descr->elsize = value->len; PyObject* obj = PyArray_Scalar(value->data, descr, NULL); if (obj==NULL) /* TODO: set exception */ return NULL; return obj; } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (PyString_Check(obj)) { value->len = PyString_GET_SIZE(obj); value->data = malloc(value->len*sizeof(char)); return_value = !! strncpy(value->data,PyString_AS_STRING(obj),value->len); } else { return_value = pyobj_to_%(ctype)s(PyObject_Str(obj), value); } if (!return_value && !PyErr_Occurred()) { PyObject* r = PyString_FromString("Failed to convert "); PyString_ConcatAndDel(&r, PyObject_Repr(PyObject_Type(obj))); PyString_ConcatAndDel(&r, PyString_FromString(" to C %(ctype)s")); PyErr_SetObject(PyExc_TypeError,r); } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) if (PyErr_Occurred()) { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); else fprintf(stderr,"pyobj_to_%(ctype)s: PyErr_Occurred()=%%p\\n", PyErr_Occurred()); } else { if (return_value) fprintf(stderr,"pyobj_to_%(ctype)s: value->len=%%d, value->data=\'%%s\'\\n", value->len, value->data); else fprintf(stderr,"pyobj_to_%(ctype)s:INCONSISTENCY with return_value=%%d and PyErr_Occurred()=%%p\\n",return_value, PyErr_Occurred()); } #endif return return_value; } ''' def __init__(self, parent, typedecl): WrapperBase.__init__(self) self.name = name = typedecl.name ti = PyTypeInterface(typedecl) self.ctype = ctype = ti.ctype defined = parent.defined_types if ctype in defined: return defined.append(ctype) self.info('Generating interface for %s: %s' % (typedecl.__class__.__name__, ctype)) self.parent = parent if isinstance(typedecl, (Integer,Byte,Real,DoublePrecision)): self.Cls = ctype[4].upper() + ctype[5:] self.capi_code_template = self.capi_code_template_scalar elif isinstance(typedecl, (Complex,DoubleComplex)): self.Cls = ctype[4].upper() + ctype[5:] PythonCAPIIntrinsicType(parent, typedecl.get_part_typedecl()) ti1 = PyTypeInterface(typedecl.get_part_typedecl()) self.fctype = ti1.ctype self.capi_code_template = self.capi_code_template_complex_scalar elif isinstance(typedecl, Logical): self.ictype = 'npy_int%s' % (typedecl.get_bit_size()) self.header_template = '#define %(ctype)s %(ictype)s' self.capi_code_template = self.capi_code_template_logical_scalar elif isinstance(typedecl, Character): self.bits = bits = typedecl.get_bit_size() if bits: self.bytes = bits/CHAR_BIT self.header_template = ''' #include <string.h> typedef struct { char data[%(bytes)s]; } %(ctype)s; ''' self.capi_code_template = self.capi_code_template_string_scalar else: self.header_template = ''' #include <string.h> typedef struct { char* data; size_t len; } %(ctype)s; ''' self.capi_code_template = self.capi_code_template_string0_scalar else: raise NotImplementedError,`name,ctype` parent.apply_templates(self) return class PythonCAPIDerivedType(WrapperBase): """ Fortran 90 derived type hooks. """ header_template_wrapper = '''\ #define %(otype)s_Check(obj) \\ PyObject_TypeCheck((PyObject*)obj, &%(otype)sType) #define %(init_func)s_f \\ F_FUNC(%(init_func)s,%(INIT_FUNC)s) ''' typedef_template_wrapper = '''\ typedef void * %(ctype)s; typedef struct { PyObject_HEAD %(ptrstruct_list)s %(ctype)s data; } %(otype)s; typedef void (*%(init_func)s_c_functype)(%(init_func_c_ctype_arg_clist)s); ''' typedef_template_importer = '''\ typedef void * %(ctype)s; typedef struct { PyObject_HEAD %(ptrstruct_list)s %(ctype)s data; } %(otype)s; typedef int (*pyobj_to_%(ctype)s_inplace_functype)(PyObject*, %(otype)s** ); typedef int (*pyobj_to_%(ctype)s_functype)(PyObject*, %(otype)s* ); typedef PyObject* (*pyobj_from_%(ctype)s_functype)(%(ctype)s*); #define %(otype)sType (*(PyTypeObject *)PyArray_API[0]) #define pyobj_from_%(ctype)s ((pyobj_from_%(ctype)s_functype)PyArray_API[1]) #define pyobj_to_%(ctype)s_inplace ((pyobj_to_%(ctype)s_inplace_functype)PyArray_API[2]) ''' extern_template_wrapper = '''\ static PyTypeObject %(otype)sType; extern void %(init_func)s_f(%(init_func)s_c_functype, void*, %(ctype)s); ''' objdecl_template_wrapper = '''\ static PyMethodDef %(otype)s_methods[] = { %(type_method_list)s {NULL} /* Sentinel */ }; static PyGetSetDef %(otype)s_getseters[] = { %(type_getseters_list)s {NULL} /* Sentinel */ }; static PyTypeObject %(otype)sType = { PyObject_HEAD_INIT(NULL) 0, /*ob_size*/ "%(modulename)s.%(name)s", /*tp_name*/ sizeof(%(otype)s), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)%(otype)s_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ %(otype)s_repr, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ "Fortran derived type %(name)s objects", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ %(otype)s_methods, /* tp_methods */ 0 /*%(otype)s_members*/, /* tp_members */ %(otype)s_getseters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)%(otype)s_init, /* tp_init */ 0, /* tp_alloc */ %(otype)s_new, /* tp_new */ }; void *F2PY_%(otype)s_API[] = { (void *) &%(otype)sType, (void *) pyobj_from_%(ctype)s, (void *) pyobj_to_%(ctype)s_inplace }; ''' objdecl_template_importer = '''\ static void **F2PY_%(otype)s_API; ''' module_init_template_wrapper = '''\ if (PyType_Ready(&%(otype)sType) < 0) goto capi_err; PyModule_AddObject(f2py_module, "%(name)s", (PyObject *)&%(otype)sType); { PyObject* c_api = PyCObject_FromVoidPtr((void *)F2PY_%(otype)s_API, NULL); PyModule_AddObject(f2py_module, "_%(NAME)s_API", c_api); if (PyErr_Occurred()) goto capi_err; } ''' module_init_template_importer = '''\ { PyObject *c_api = NULL; PyObject *wrappermodule = PyImport_ImportModule("%(wrappermodulename)s"); if (wrappermodule == NULL) goto capi_%(name)s_err; c_api = PyObject_GetAttrString(wrappermodule, "_%(NAME)s_API"); if (c_api == NULL) {Py_DECREF(wrappermodule); goto capi_%(name)s_err;} if (PyCObject_Check(c_api)) { F2PY_%(otype)s_API = (void **)PyCObject_AsVoidPtr(c_api); } Py_DECREF(c_api); Py_DECREF(wrappermodule); if (F2PY_%(otype)s_API != NULL) goto capi_%(name)s_ok; capi_%(name)s_err: PyErr_Print(); PyErr_SetString(PyExc_ImportError, "%(wrappermodulename)s failed to import"); return; capi_%(name)s_ok: c_api = PyCObject_FromVoidPtr((void *)F2PY_%(otype)s_API, NULL); PyModule_AddObject(f2py_module, "_%(NAME)s_API", c_api); if (PyErr_Occurred()) goto capi_err; } ''' c_code_template_wrapper = '''\ static void %(init_func)s_c( %(init_func_c_arg_clist)s) { %(init_func_c_body_list)s } ''' capi_code_template_wrapper = '''\ static void %(otype)s_dealloc(%(otype)s* self) { if (self->data) PyMem_Free(self->data); self->ob_type->tp_free((PyObject*)self); } static int pyobj_to_%(ctype)s_inplace(PyObject *obj, %(otype)s** value_ptr) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (%(otype)s_Check(obj)) { *value_ptr = (%(otype)s*)obj; return_value = 1; } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s: return_value=%%d, PyErr_Occurred()=%%p\\n", return_value, PyErr_Occurred()); #endif return return_value; } static int pyobj_to_%(ctype)s(PyObject *obj, %(ctype)s* value_ptr) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s(type=%%s)\\n",PyString_AS_STRING(PyObject_Repr(PyObject_Type(obj)))); #endif if (%(otype)s_Check(obj)) { if (!memcpy(value_ptr,((%(otype)s *)obj)->data, %(bytes)s)) { PyErr_SetString(PyExc_MemoryError, "failed to copy %(name)s instance memory to %(ctype)s object."); } else { return_value = 1; } } #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"pyobj_to_%(ctype)s: return_value=%%d, PyErr_Occurred()=%%p\\n", return_value, PyErr_Occurred()); #endif return return_value; } static PyObject* pyobj_from_%(ctype)s(%(ctype)s* value_ptr) { %(otype)s* obj = (%(otype)s*)(%(otype)sType.tp_alloc(&%(otype)sType, 0)); if (obj == NULL) return NULL; obj->data = PyMem_Malloc(%(bytes)s); if (obj->data == NULL) { Py_DECREF(obj); return PyErr_NoMemory(); } if (value_ptr) { if (!memcpy(obj->data, value_ptr, %(bytes)s)) { PyErr_SetString(PyExc_MemoryError, "failed to copy %(ctype)s object memory to %(name)s instance."); } } %(init_func)s_f(%(init_func)s_c, obj, obj->data); return (PyObject*)obj; } static PyObject * %(otype)s_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { return pyobj_from_%(ctype)s(NULL); } static int %(otype)s_init(%(otype)s *self, PyObject *capi_args, PyObject *capi_kwds) { int return_value = 0; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"%(otype)s_init()\\n"); #endif if (!PyArg_ParseTuple(capi_args,"%(attr_format_elist)s" %(attr_init_clist)s)) return_value = -1; #if defined(F2PY_DEBUG_PYOBJ_TOFROM) fprintf(stderr,"%(otype)s_init: return_value=%%d, PyErr_Occurred()=%%p\\n", return_value, PyErr_Occurred()); #endif return return_value; } static PyObject * %(otype)s_as_tuple(%(otype)s * self) { return Py_BuildValue("%(as_tuple_format_elist)s" %(as_tuple_arg_clist)s); } static PyObject * %(otype)s_repr(PyObject * self) { PyObject* r = PyString_FromString("%(name)s("); PyString_ConcatAndDel(&r, PyObject_Repr(%(otype)s_as_tuple((%(otype)s*)self))); PyString_ConcatAndDel(&r, PyString_FromString(")")); return r; } %(getset_func_list)s ''' fortran_code_template_wrapper = '''\ subroutine %(init_func)s(init_func_c, self, obj) %(use_stmt_list)s %(type_decl_list)s external init_func_c ! self is %(otype)s external self %(ftype)s obj call init_func_c(%(init_func_f_arg_clist)s) end ''' #module_method_template = '''''' _defined = [] def __init__(self, parent, typedecl): WrapperBase.__init__(self) ti = PyTypeInterface(typedecl) self.ctype = ctype = ti.ctype defined = parent.defined_types if ctype in defined: return defined.append(ctype) implement_wrappers = True if isinstance(typedecl.parent,Module) and typedecl.parent.name!=parent.modulename: implement_wrappers = False self.info('Using api for %s.%s: %s' % (parent.modulename, typedecl.name, ctype)) self.wrappermodulename = typedecl.parent.name else: self.info('Generating interface for %s.%s: %s' % (parent.modulename, typedecl.name, ctype)) parent.isf90 = True self.parent = parent self.name = name = typedecl.name self.otype = otype = ti.otype self.ctype = ctype = ti.ctype self.ctype_ptrs = self.ctype + '_ptrs' self.ftype = ti.ftype self.bytes = bytes = ti.bytes if not implement_wrappers: self.typedef_template = self.typedef_template_importer self.objdecl_template = self.objdecl_template_importer self.module_init_template = self.module_init_template_importer else: self.header_template = self.header_template_wrapper self.typedef_template = self.typedef_template_wrapper self.extern_template = self.extern_template_wrapper self.objdecl_template = self.objdecl_template_wrapper self.module_init_template = self.module_init_template_wrapper self.c_code_template = self.c_code_template_wrapper self.capi_code_template = self.capi_code_template_wrapper self.fortran_code_template = self.fortran_code_template_wrapper WrapperCPPMacro(parent, 'F_FUNC') self.init_func_f_arg_list = ['self'] self.init_func_c_arg_list = ['%s *self' % (otype)] self.init_func_c_ctype_arg_list = ['%s *' % (otype)] self.init_func_c_body_list = [] self.ptrstruct_list = [] self.attr_decl_list = [] self.attr_format_list = [] self.attr_init_list = [] self.as_tuple_format_list = [] self.as_tuple_arg_list = [] self.getset_func_list = [] self.type_getseters_list = [] for n in typedecl.a.component_names: v = typedecl.a.components[n] t = v.get_typedecl() ti1 = PyTypeInterface(t) PythonCAPIType(parent, t) ct = ti1.ctype parent.add(t) self.ptrstruct_list.append('%s* %s_ptr;' % (ct, n)) self.init_func_f_arg_list.append('obj %% %s' % (n)) self.init_func_c_arg_list.append('\n%s * %s_ptr' % (ct, n)) self.init_func_c_ctype_arg_list.append('\n%s *' % (ct)) self.init_func_c_body_list.append('''\ if (!((void*)%(n)s_ptr >= self->data && (void*)%(n)s_ptr < self->data + %(bytes)s )) fprintf(stderr,"INCONSISTENCY IN %(name)s WRAPPER: " "self->data=%%p <= %(n)s_ptr=%%p < self->data+%(bytes)s=%%p\\n", self->data, %(n)s_ptr, self->data + %(bytes)s); self->%(n)s_ptr = %(n)s_ptr; ''' % (locals())) self.attr_format_list.append('O&') self.attr_init_list.append('\npyobj_to_%s, self->%s_ptr' % (ct,n)) self.as_tuple_format_list.append('O&') self.as_tuple_arg_list.append('\npyobj_from_%s, self->%s_ptr' % (ct, n)) self.getset_func_list.append('''\ static PyObject * %(otype)s_get_%(n)s(%(otype)s *self, void *closure) { return pyobj_from_%(ct)s(self->%(n)s_ptr); } static int %(otype)s_set_%(n)s(%(otype)s *self, PyObject *value, void *closure) { if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete %(name)s attribute %(n)s"); return -1; } if (pyobj_to_%(ct)s(value, self->%(n)s_ptr)) return 0; return -1; } ''' % (locals())) self.type_getseters_list.append('{"%(n)s",(getter)%(otype)s_get_%(n)s, (setter)%(otype)s_set_%(n)s,\n "component %(n)s",NULL},' % (locals())) if self.attr_init_list: self.attr_init_list.insert(0,'') if self.as_tuple_arg_list: self.as_tuple_arg_list.insert(0,'') self.init_func = self.ctype + '_init' self.type_method_list = [] self.type_method_list.append('{"as_tuple",(PyCFunction)%(otype)s_as_tuple,METH_NOARGS,\n "Return %(name)s components as tuple."},' % (self.__dict__)) self.use_stmt_list = [] self.type_decl_list = [] if isinstance(typedecl.parent, Module): self.use_stmt_list.append('use %s' % (typedecl.parent.name)) elif isinstance(typedecl.parent, (Subroutine, Function)): self.type_decl_list.append(typedecl.asfix()) else: raise NotImplementedError,'types declared in '+typedecl.parent.__class__.__name__ parent.apply_templates(self) return
{ "repo_name": "santisiri/popego", "path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/f2py/lib/py_wrap_type.py", "copies": "1", "size": "28411", "license": "bsd-3-clause", "hash": 3007326061185917400, "line_mean": 36.7304116866, "line_max": 157, "alpha_frac": 0.5922354018, "autogenerated": false, "ratio": 3.111829134720701, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4204064536520701, "avg_score": null, "num_lines": null }
"""All Python code for Five Weird Python Tricks presentation.""" # 1 # Tuple Unpacking tup = ('a', 'b', 'c') a, b, c = tup print(a) print(b) print(c) # Should be called sequence unpacking a, b, c = ['a', 'b', 'c'] print(a) print(b) print(c) a, b, c = 'abc' print(a) print(b) print(c) # Useful in many situations # Example: you expect a certain number of items from str.split() try: request = 'GET /login.html HTTP/1.1' method, uri, protocol = request.split() except ValueError: print('400 Bad Request!') # 2 # Iterate over a dictionary dictionary = {'a': 0, 'b': 1, 'c': 2, 'd': 3} # Not So Good: for key in dictionary: value = dictionary[key] print('{}: {}'.format(key, value)) # Much Better: for key, value in dictionary.items(): print('{}: {}'.format(key, value)) print(dictionary.items()) # 3 # Iterating over an ordered sequence (list, tuple, string, etc.) a_list = [5, 2, ('x', 'y'), 7.7, 9, 'c', 10, 2, 'a'] # Really Bad: for index in range(len(a_list)): print(a_list[index]) # Better: for item in a_list: print(item) # But what if you need the index AND the item? # Still Really Bad: for index in range(len(a_list)): print('Item at index {} is {}'.format(index, a_list[index])) # Best Ever: for index, item in enumerate(a_list): print('Item at index {} is {}'.format(index, item)) # 4 # Multi-Assignment a = 'a' b = 'b' # How could you swap these values? Use an intermediate? c = a a = b b = c print('a = {}'.format(a)) print('b = {}'.format(b)) # But why use 3 lines when you can use only one? a, b = b, a print('a = {}'.format(a)) print('b = {}'.format(b)) # Useful for swapping items in a list: a_list = [1, 2, 3, 4] a_list[0], a_list[1] = a_list[1], a_list[0] print(a_list) # Or even reversing a Linked List in one line! def reverse_linked_list(linked_list): """Reverse a Linked List and return it.""" node1 = linked_list.head node2 = linked_list.head.next while node2.next: node2.next, node2, node1 = node1, node2.next, node2 return node2 # 5. # Bools are Ints are Bools # OK: print('a string' * 2) # Not OK: try: print('a string' + 2) except TypeError: print('Not OK!') # What about bools? print(1 + False) print(1 + True) # *record scratch* Whaaaaat? print(10 * True) print(10 * False) # Bools are ints are bools are ints! # How is this useful? # Counting things based on a boolean property list_of_nums = [7, 2, 0, 3, 99, 11] insert_num = 10 insertion_index = sum(num < insert_num for num in list_of_nums) print(insertion_index) # Adding a string or not for n in range(16): divides_by_3 = n % 3 == 0 # Boolean statement divides_by_5 = n % 5 == 0 # Boolean statement fizz = 'Fizz' * divides_by_3 # str * bool buzz = 'Buzz' * divides_by_5 # str * bool fizzbuzz = fizz + buzz print(fizzbuzz or n) # One line for your mind: for n in range(16): print('Fizz' * (n % 3 == 0) + 'Buzz' * (n % 5 == 0) or n)
{ "repo_name": "WillWeatherford/five-weird-python-tricks", "path": "five_weird_python_tricks.py", "copies": "1", "size": "2995", "license": "mit", "hash": 2432657241896384500, "line_mean": 15.3661202186, "line_max": 64, "alpha_frac": 0.6103505843, "autogenerated": false, "ratio": 2.6909254267744833, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3801276011074483, "avg_score": null, "num_lines": null }
__all__ = ['python_to_notebook', 'Notebook'] import json import copy import warnings # Skeleton notebook in JSON format skeleton_nb = """{ "metadata": { "name":"" }, "nbformat": 3, "nbformat_minor": 0, "worksheets": [ { "cells": [ { "cell_type": "code", "collapsed": false, "input": [ "%matplotlib inline" ], "language": "python", "metadata": {}, "outputs": [] } ], "metadata": {} } ] }""" class Notebook(object): """ Notebook object for building an IPython notebook cell-by-cell. """ def __init__(self): # cell type code self.cell_code = { 'cell_type': 'code', 'collapsed': False, 'input': [ '# Code Goes Here' ], 'language': 'python', 'metadata': {}, 'outputs': [] } # cell type markdown self.cell_md = { 'cell_type': 'markdown', 'metadata': {}, 'source': [ 'Markdown Goes Here' ] } self.template = json.loads(skeleton_nb) self.cell_type = {'input': self.cell_code, 'source': self.cell_md} self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'} def add_cell(self, value, cell_type='code'): """Add a notebook cell. Parameters ---------- value : str Cell content. cell_type : {'code', 'markdown'} Type of content (default is 'code'). """ if cell_type in ['markdown', 'code']: key = self.valuetype_to_celltype[cell_type] cells = self.template['worksheets'][0]['cells'] cells.append(copy.deepcopy(self.cell_type[key])) # assign value to the last cell cells[-1][key] = value else: warnings.warn('Ignoring unsupported cell type (%s)' % cell_type) def json(self): """Return a JSON representation of the notebook. Returns ------- str JSON notebook. """ return json.dumps(self.template, indent=2) def test_notebook_basic(): nb = Notebook() assert(json.loads(nb.json()) == json.loads(skeleton_nb)) def test_notebook_add(): nb = Notebook() str1 = 'hello world' str2 = 'f = lambda x: x * x' nb.add_cell(str1, cell_type='markdown') nb.add_cell(str2, cell_type='code') d = json.loads(nb.json()) cells = d['worksheets'][0]['cells'] values = [c['input'] if c['cell_type'] == 'code' else c['source'] for c in cells] assert values[1] == str1 assert values[2] == str2 assert cells[1]['cell_type'] == 'markdown' assert cells[2]['cell_type'] == 'code' if __name__ == "__main__": import numpy.testing as npt npt.run_module_suite()
{ "repo_name": "bennlich/scikit-image", "path": "doc/ext/notebook.py", "copies": "44", "size": "3042", "license": "bsd-3-clause", "hash": 8867192661948211000, "line_mean": 23.1428571429, "line_max": 76, "alpha_frac": 0.4779750164, "autogenerated": false, "ratio": 3.945525291828794, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
__all__ = ['PythonWrapperModule'] import re import os import sys from parser.api import * from wrapper_base import * from py_wrap_type import * from py_wrap_subprogram import * class PythonWrapperModule(WrapperBase): main_template = '''\ #ifdef __cplusplus extern \"C\" { #endif #include "Python.h" #define PY_ARRAY_UNIQUE_SYMBOL PyArray_API #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" %(header_list)s %(typedef_list)s %(extern_list)s %(c_code_list)s %(capi_code_list)s %(objdecl_list)s static PyObject *f2py_module; static PyMethodDef f2py_module_methods[] = { %(module_method_list)s {NULL,NULL,0,NULL} }; PyMODINIT_FUNC init%(modulename)s(void) { f2py_module = Py_InitModule("%(modulename)s", f2py_module_methods); import_array(); if (PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "failed to load array module."); goto capi_err; } %(module_init_list)s return; capi_err: if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "failed to initialize %(modulename)s module."); } return; } #ifdef __cplusplus } #endif ''' main_fortran_template = '''\ %(fortran_code_list)s ''' def __init__(self, modulename): WrapperBase.__init__(self) self.modulename = modulename self.cname = 'f2py_' + modulename self.defined_cpp_code = [] self.defined_c_code = [] self.defined_types = [] self.defined_capi_codes = [] self.header_list = [] self.typedef_list = [] self.extern_list = [] self.objdecl_list = [] self.c_code_list = [] self.capi_code_list = [] self.module_method_list = [] self.module_init_list = [] self.fortran_code_list = [] self.list_names = ['header', 'typedef', 'extern', 'objdecl', 'c_code','capi_code','module_method','module_init', 'fortran_code'] self.isf90 = False return def add(self, block): if isinstance(block, BeginSource): for name, moduleblock in block.a.module.items(): self.add(moduleblock) #for name, subblock in block.a.external_subprogram.items(): # self.add(subblock) elif isinstance(block, Subroutine): PythonCAPISubProgram(self, block) elif isinstance(block, Function): fcode = block.subroutine_wrapper_code() self.fortran_code_list.append(fcode) wrapper_block = block.subroutine_wrapper() PythonCAPISubProgram(self, wrapper_block) elif isinstance(block, Module): self.isf90 = True for name,declblock in block.a.type_decls.items(): self.add(declblock) for name,subblock in block.a.module_subprogram.items(): self.add(subblock) elif isinstance(block, tuple([TypeDecl]+declaration_type_spec)): if isinstance(block, (TypeDecl, TypeStmt)): self.isf90 = True PythonCAPIType(self, block) else: raise NotImplementedError,`block.__class__.__name__` return def c_code(self): return self.apply_attributes(self.main_template) def fortran_code(self): return self.apply_attributes(self.main_fortran_template)
{ "repo_name": "santisiri/popego", "path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/f2py/lib/py_wrap.py", "copies": "1", "size": "3347", "license": "bsd-3-clause", "hash": -8553142543168349000, "line_mean": 25.1484375, "line_max": 87, "alpha_frac": 0.6038243203, "autogenerated": false, "ratio": 3.5380549682875264, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4641879288587526, "avg_score": null, "num_lines": null }
__all__ = 'qkids_score' from athlib import parse_hms, normalize_event_code, PAT_RUN from typing import Union def qkids_score(competition_type: str, event: str, perf: Union[str, int, float]) -> int: competition_type = competition_type.replace(' ','').upper() if competition_type in _compTypeMap: competition_type = _compTypeMap[competition_type] table = _qkidsTables.get(competition_type,None) if not table: raise ValueError('cannot find QuadKids Table for competition type %r' % competition_type) event = normalize_event_code(event) row = table.get(event,None) if not row: raise ValueError('cannot find find data for QuadKids Table[%r][%r]' % (competition_type,event)) delta = float((row[1] - parse_hms(perf)) if PAT_RUN.match(event) else (float(perf) - row[1])) v = int(1e-6 + delta/row[0] + 10) return max(10,min(v,100)) #start qkids tables created by qkids-process.py Thu Feb 27 16:47:32 2020 _qkidsTables = { 'QKWL': { '75': [0.1, 16.0, 7.0], '600': [1, 180, 90], 'LJ': [0.03, 2.2, 4.9], 'OT': [0.5, 5.0, 50.0], '4x100': [0.5, 94.0, 49.0] }, 'QKWLU13': { '70H': [0.1, 20.0, 11.0], '800': [1, 230, 140], 'LJ': [0.04, 2.0, 5.6], 'SP': [0.1, 2.0, 11.0], '4x100': [0.5, 94.0, 49.0] }, 'QKSEC': { '100': [0.1, 19.5, 10.5], '800': [1, 230, 140], 'LJ': [0.05, 2.0, 6.5], 'OT': [0.6, 6.0, 60.0], '4x100': [0.5, 94.0, 49.0] }, 'QKPRI': { '75': [0.1, 16.0, 7.0], '600': [1, 180, 90], 'SLJ': [0.02, 1.02, 2.82], 'OT': [0.5, 5.0, 50.0], '4x100': [0.5, 94.0, 49.0] }, 'QKSTA': { '50': [0.1, 12.0, 3.0], '400': [1, 125, 35], 'SLJ': [0.03, 0.75, 3.0], 'OT': [0.5, 2.5, 47.5], '4x100': [0.5, 94.0, 49.0] }, 'QKCLUB': { '75': [0.1, 16.0, 7.0], '600': [1, 180, 90], 'LJ': [0.03, 2.2, 4.9], 'OT': [0.5, 5.0, 50.0], '4x100': [0.5, 94.0, 49.0] }, 'QKCLU13': { '100': [0.1, 19.5, 10.5], '800': [1, 230, 140], 'LJ': [0.05, 2.0, 6.5], 'OT': [0.6, 6.0, 60.0], '4x100': [0.5, 94.0, 49.0] }, 'QKCLU9': { '50': [0.1, 12.0, 3.0], '400': [1, 130, 40], 'SLJ': [0.02, 0.9, 2.7], 'OT': [0.5, 0.0, 45.0], '4x100': [0.5, 94.0, 49.0] }, 'QKPRE': { '50': [0.1, 13.0, 4.0], '300': [1, 135, 45], 'SLJ': [0.03, 0.5, 3.2], 'OT': [0.5, 0.0, 45.0], '4x100': [0.5, 99.0, 54.0] } } _qkidsTables['QKWLU13']['75H'] = _qkidsTables['QKWLU13']['70H'] _compTypeMap = {'WESSEXLEAGUE': 'QKWL', 'WESSEXLEAGUE(U13)': 'QKWLU13', 'QUADKIDSSECONDARY': 'QKSEC', 'QUADKIDSPRIMARY': 'QKPRI', 'QUADKIDSSTART': 'QKSTA', 'QUADKIDSCLUB': 'QKCLUB', 'QUADKIDSCLUBU13': 'QKCLU13', 'QUADKIDSCLUBU9': 'QKCLU9', 'QUADKIDSPRE-START': 'QKPRE'} #end qkids tables
{ "repo_name": "openath/athlib", "path": "athlib/qkids_score.py", "copies": "1", "size": "2803", "license": "apache-2.0", "hash": 1763166600404240600, "line_mean": 29.4673913043, "line_max": 103, "alpha_frac": 0.5073135926, "autogenerated": false, "ratio": 2.10119940029985, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8086444359994305, "avg_score": 0.004413726581109013, "num_lines": 92 }
__all__ = ['qList_to_pd_series', 'qtable_to_dataframe', 'qTempList_to_pd_tseries', 'get_q_status', 'convert_qdata'] from collections import OrderedDict import pandas import numpy from qpython.qcollection import QTable, QKeyedTable, QDictionary, QList, QTemporalList from qpython.qtype import qnull, QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN, QNULLMAP _EPOCH_QMONTH = numpy.timedelta64(360, 'M') _EPOCH_QDATE = numpy.timedelta64(10957, 'D') _EPOCH_QDATETIME = numpy.timedelta64(946684800000, 'ms') _EPOCH_TIMESTAMP = numpy.timedelta64(946684800000000000, 'ns') _QMONTH_NULL = qnull(QMONTH) _QDATE_NULL = qnull(QDATE) _QDATETIME_NULL = qnull(QDATETIME) _QMINUTE_NULL = qnull(QMINUTE) _QSECOND_NULL = qnull(QSECOND) _QTIME_NULL = qnull(QTIME) _QTIMESTAMP_NULL = qnull(QTIMESTAMP) _QTIMESPAN_NULL = qnull(QTIMESPAN) def qtable_to_dataframe(q_table): """ Converts a QTable into a pandas.DataFrame with type level conversions occurring as well: dates -> datetime64 w/ qnulls replaces with NaT's times -> timedelta64 w/ qnulls replaces with NaT's other types -> qnulls replaced with NaN's >>> data = q('trade:([]date:`date$();time:`time$();sym:`symbol$();price:`float$();size:`int$())') >>> data = q('`trade insert(2000.01.01;00:00:00.000;`a;10.75;100)') >>> data = q('`trade insert(0Nd;0Nt;`;0n;0N)') >>> data = q('trade') >>> print qtable_to_dataframe(data) date time sym price size 0 2000-01-01 0 days a 10.75 100 1 NaT NaT NaN NaN NaN >>> data = q('kt:(flip (enlist `eid)!enlist 0n 1002)!flip `name`iq!(`Dent`Beeblebrox;98 42)') >>> data = q('kt') >>> print qtable_to_dataframe(data) name iq eid NaN Dent 98 1002 Beeblebrox 42 >>> data = q('ktc:([lname:`Dent``Prefect; fname:`Arthur`Zaphod`]; iq:98 42 126)') >>> data = q('ktc') >>> print qtable_to_dataframe(data) iq lname fname Dent Arthur 98 NaN Zaphod 42 Prefect NaN 126 :param q_table: Input qTable :return: pandas.DataFrame :raises TypeError: Only QTable and QKeyedTable are supported """ if isinstance(q_table, QTable): cols = _qtable_to_series_odict(q_table) df = pandas.DataFrame(cols) elif isinstance(q_table, QKeyedTable) or isinstance(q_table, QDictionary): cols = _qtable_to_series_odict(q_table.values) cols.update(_qtable_to_series_odict(q_table.keys)) # For now, this seems to be the best option for dealing with keyed tables... # Basically you stuff all of the columns inpt the main frame and set the # keyed columns to be the index. This allow for the trivial addition of # multiple same level indexes df = pandas.DataFrame(cols).set_index([i for i in q_table.keys.dtype.names]) else: raise ValueError('Only QTable and QKeyedTable are supported') # Converted Dataframe return df def _qtable_to_series_odict(q_table): # Converted Series Columns """ Utility function for qtable_to_dataframe This has no type awareness for QKeyedTables vs QTables -- assumes everything is a QTable :param q_table: Input Table :return: OrderedDict of converted columns """ cols = OrderedDict() for col in q_table.dtype.names: q_type = q_table.meta[col] if q_type in [QMINUTE, QSECOND, QTIME, QTIMESPAN, QMONTH, QDATE, QDATETIME, QTIMESTAMP]: cols[col] = qTempList_to_pd_tseries(q_table[col], q_type) else: cols[col] = qList_to_pd_series(q_table[col], q_type) return cols def qTempList_to_pd_tseries(q_list, q_type): """ Returns new converted pandas.TimeSeries using q_lists data. qnulls will be replaced with NaT This function may take any q temporal type as an input and as such will return the appropriate series of either timedelta64's or datetime64's :param q_list: Input Data :param q_type: Input Data's q_type :return: pandas.TimeSeries :raise TypeError: input datatype was not a qtemporal """ offset = None if q_type == QTIMESTAMP: res = 'ns' offset = _EPOCH_TIMESTAMP elif q_type == QDATE: res = 'D' offset = _EPOCH_QDATE elif q_type == QMONTH: res = 'M' offset = _EPOCH_QMONTH elif q_type == QDATETIME: res = 'ms' offset = _EPOCH_QDATETIME elif q_type == QTIME: res = 'ms' elif q_type == QMINUTE: res = 'm' elif q_type == QSECOND: res = 's' elif q_type == QTIMESPAN: res = 'ns' else: raise TypeError("invalid q_type submitted: {}".format(q_type)) if offset: dtype = 'datetime64[{}]'.format(res) null_val = numpy.datetime64('NaT') else: null_val = numpy.timedelta64('NaT') dtype = 'timedelta64[{}]'.format(res) null_func = QNULLMAP[q_type][2] nulls = null_func(q_list) out = numpy.empty_like(q_list, dtype=dtype) out[~nulls] = q_list[~nulls].astype(dtype) if offset: out[~nulls] += offset out[nulls] = null_val return pandas.TimeSeries(data=out) def qList_to_pd_series(q_list, q_type): """ Returns a new pandas.Series from q_list with values converted from qulls to NaN :param q_list: Input data :param q_type: q_lists original q_type :return: pandas.Series w/ nan's for qnulls """ null = QNULLMAP[q_type][1] return pandas.Series(data=q_list).replace(null, numpy.NaN) def get_q_status(q_conn): status = ( ('Is Connected', str(q_conn.is_connected())), ('Protocol Version', str(q_conn.protocol_version)), ('Host', str(q_conn.host)), ('Port', str(q_conn.port)), ('Timeout', str(q_conn.timeout)) ) return status def convert_qdata(data): if isinstance(data, QTable) or isinstance(data, QKeyedTable) or isinstance(data, QDictionary): html = qtable_to_html(data) elif isinstance(data, QList): html = "<samp>{}</samp>".format(str(data.tolist())) elif isinstance(data, QTemporalList): html = "<samp>{}</samp>".format(str(convert_qtemporal(data))) else: html = "<samp>{}</samp>".format(str(data)) return html def convert_qtemporal(data): sane = [str(i.raw) for i in data] return sane def qtable_to_html(q_table): html = qtable_to_dataframe(q_table).to_html( max_rows=100, escape=False).replace('border="1" class="dataframe"', 'class="table table-striped"') return html
{ "repo_name": "buckie/flask-kdb", "path": "flask_kdb/utils.py", "copies": "1", "size": "6618", "license": "mit", "hash": -7368448609734003000, "line_mean": 31.9303482587, "line_max": 115, "alpha_frac": 0.6287398005, "autogenerated": false, "ratio": 3.2173067574137093, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4346046557913709, "avg_score": null, "num_lines": null }
__all__ = ("QueryFieldList",) class QueryFieldList: """Object that handles combinations of .only() and .exclude() calls""" ONLY = 1 EXCLUDE = 0 def __init__( self, fields=None, value=ONLY, always_include=None, _only_called=False ): """The QueryFieldList builder :param fields: A list of fields used in `.only()` or `.exclude()` :param value: How to handle the fields; either `ONLY` or `EXCLUDE` :param always_include: Any fields to always_include eg `_cls` :param _only_called: Has `.only()` been called? If so its a set of fields otherwise it performs a union. """ self.value = value self.fields = set(fields or []) self.always_include = set(always_include or []) self._id = None self._only_called = _only_called self.slice = {} def __add__(self, f): if isinstance(f.value, dict): for field in f.fields: self.slice[field] = f.value if not self.fields: self.fields = f.fields elif not self.fields: self.fields = f.fields self.value = f.value self.slice = {} elif self.value is self.ONLY and f.value is self.ONLY: self._clean_slice() if self._only_called: self.fields = self.fields.union(f.fields) else: self.fields = f.fields elif self.value is self.EXCLUDE and f.value is self.EXCLUDE: self.fields = self.fields.union(f.fields) self._clean_slice() elif self.value is self.ONLY and f.value is self.EXCLUDE: self.fields -= f.fields self._clean_slice() elif self.value is self.EXCLUDE and f.value is self.ONLY: self.value = self.ONLY self.fields = f.fields - self.fields self._clean_slice() if "_id" in f.fields: self._id = f.value if self.always_include: if self.value is self.ONLY and self.fields: if sorted(self.slice.keys()) != sorted(self.fields): self.fields = self.fields.union(self.always_include) else: self.fields -= self.always_include if getattr(f, "_only_called", False): self._only_called = True return self def __bool__(self): return bool(self.fields) def as_dict(self): field_list = {field: self.value for field in self.fields} if self.slice: field_list.update(self.slice) if self._id is not None: field_list["_id"] = self._id return field_list def reset(self): self.fields = set() self.slice = {} self.value = self.ONLY def _clean_slice(self): if self.slice: for field in set(self.slice.keys()) - self.fields: del self.slice[field]
{ "repo_name": "MongoEngine/mongoengine", "path": "mongoengine/queryset/field_list.py", "copies": "1", "size": "2964", "license": "mit", "hash": -4427907386710486500, "line_mean": 32.6818181818, "line_max": 82, "alpha_frac": 0.5448717949, "autogenerated": false, "ratio": 3.9838709677419355, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00013691128148959474, "num_lines": 88 }
__all__ = ('QueryFieldList',) class QueryFieldList(object): """Object that handles combinations of .only() and .exclude() calls""" ONLY = 1 EXCLUDE = 0 def __init__(self, fields=None, value=ONLY, always_include=None, _only_called=False): """The QueryFieldList builder :param fields: A list of fields used in `.only()` or `.exclude()` :param value: How to handle the fields; either `ONLY` or `EXCLUDE` :param always_include: Any fields to always_include eg `_cls` :param _only_called: Has `.only()` been called? If so its a set of fields otherwise it performs a union. """ self.value = value self.fields = set(fields or []) self.always_include = set(always_include or []) self._id = None self._only_called = _only_called self.slice = {} def __add__(self, f): if isinstance(f.value, dict): for field in f.fields: self.slice[field] = f.value if not self.fields: self.fields = f.fields elif not self.fields: self.fields = f.fields self.value = f.value self.slice = {} elif self.value is self.ONLY and f.value is self.ONLY: self._clean_slice() if self._only_called: self.fields = self.fields.union(f.fields) else: self.fields = f.fields elif self.value is self.EXCLUDE and f.value is self.EXCLUDE: self.fields = self.fields.union(f.fields) self._clean_slice() elif self.value is self.ONLY and f.value is self.EXCLUDE: self.fields -= f.fields self._clean_slice() elif self.value is self.EXCLUDE and f.value is self.ONLY: self.value = self.ONLY self.fields = f.fields - self.fields self._clean_slice() if '_id' in f.fields: self._id = f.value if self.always_include: if self.value is self.ONLY and self.fields: if sorted(self.slice.keys()) != sorted(self.fields): self.fields = self.fields.union(self.always_include) else: self.fields -= self.always_include if getattr(f, '_only_called', False): self._only_called = True return self def __nonzero__(self): return bool(self.fields) def as_dict(self): field_list = dict((field, self.value) for field in self.fields) if self.slice: field_list.update(self.slice) if self._id is not None: field_list['_id'] = self._id return field_list def reset(self): self.fields = set([]) self.slice = {} self.value = self.ONLY def _clean_slice(self): if self.slice: for field in set(self.slice.keys()) - self.fields: del self.slice[field]
{ "repo_name": "yarneo/mongoengine", "path": "mongoengine/queryset/field_list.py", "copies": "28", "size": "2968", "license": "mit", "hash": 8796353404320243000, "line_mean": 33.9176470588, "line_max": 89, "alpha_frac": 0.5485175202, "autogenerated": false, "ratio": 3.9785522788203753, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0002724623986140641, "num_lines": 85 }
__all__ = ['Query'] class Query: ''' This class represents a Query object. ''' __slots__ = ['mglo', 'crender', 'ctx', 'extra'] def __init__(self): self.mglo = None #: Internal representation for debug purposes only. self.crender = None #: ConditionalRender: Can be used in a ``with`` statement. self.ctx = None #: The context this object belongs to self.extra = None #: Any - Attribute for storing user defined objects raise TypeError() def __repr__(self): return '<Query>' def __enter__(self): self.mglo.begin() return self def __exit__(self, *args): self.mglo.end() @property def samples(self) -> int: ''' int: The number of samples passed. ''' return self.mglo.samples @property def primitives(self) -> int: ''' int: The number of primitives generated. ''' return self.mglo.primitives @property def elapsed(self) -> int: ''' int: The time elapsed in nanoseconds. ''' return self.mglo.elapsed
{ "repo_name": "cprogrammer1994/ModernGL", "path": "moderngl/query.py", "copies": "1", "size": "1152", "license": "mit", "hash": -4318670939890444000, "line_mean": 22.04, "line_max": 87, "alpha_frac": 0.5373263889, "autogenerated": false, "ratio": 4.189090909090909, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.522641729799091, "avg_score": null, "num_lines": null }
__all__ = ['QueryResult', 'StatementQueryResult', 'AgentQueryResult'] from typing import Iterable as TypeIterable, Iterable, Union as TypeUnion from indra.statements import stmts_from_json class QueryResult(object): """The generic result of a query. This class standardizes the results of queries to the readonly database. Parameters ---------- results : Iterable The results of the query keyed by unique IDs (mk_hash for PA Statements, IDs for Raw Statements, etc.) limit : int The limit that was applied to this query. offset_comp : int The next offset that would be appropriate if this is a paging query. evidence_counts : dict The count of evidence for each element. belief_scores : dict The belief score of each element. source_counts : dict The source counts for each element. query_json : dict A description of the query that was used. result_type : str The type of the result, e.g. 'agent', 'relation', etc. Attributes ---------- results : Iterable The results of the query keyed by unique IDs (mk_hash for PA Statements, IDs for Raw Statements, etc.) limit : int The limit that was applied to this query. next_offset : int The next offset that would be appropriate if this is a paging query. evidence_counts : dict The count of evidence for each element. belief_scores : dict The belief score of each element. source_counts : dict The source counts for each element. query_json : dict A description of the query that was used. """ def __init__(self, results: TypeIterable, limit: int, offset: int, offset_comp: int, evidence_counts: dict, belief_scores: dict, source_counts: dict, query_json: dict, result_type: str): if not isinstance(results, Iterable) or isinstance(results, str): raise ValueError("Input `results` is expected to be an iterable, " "and not a string.") self.results = results self.evidence_counts = evidence_counts self.belief_scores = belief_scores self.total_evidence = sum(self.evidence_counts.values()) if self.belief_scores: self.max_belief = max(self.belief_scores.values()) else: self.max_belief = None self.source_counts = source_counts self.limit = limit self.offset = offset self.result_type = result_type self.offset_comp = offset_comp if limit is None or offset_comp < limit: self.next_offset = None else: self.next_offset = (0 if offset is None else offset) + offset_comp self.query_json = query_json @classmethod def empty(cls, empty_res, limit, offset, query_json, result_type): return cls(empty_res, limit, offset, 0, {}, {}, {}, query_json, result_type) @classmethod def from_json(cls, json_dict) \ -> TypeUnion['QueryResult', 'StatementQueryResult', 'AgentQueryResult']: # Build a StatementQueryResult or AgentQueryResult if appropriate if json_dict['result_type'] == 'statements': return StatementQueryResult.from_json(json_dict) elif json_dict['result_type'] == 'agents': return AgentQueryResult.from_json(json_dict) return cls._parse_json(json_dict) @classmethod def _parse_json(cls, json_dict): # Filter out some calculated values. next_offset = json_dict.pop('next_offset', None) total_evidence = json_dict.pop('total_evidence', None) # Build the class nc = cls(**json_dict) # Convert result keys into integers, if appropriate if nc.result_type in ['statements', 'interactions']: nc.results = {int(k): v for k, v in nc.results.items()} if nc.result_type in ['statements', 'interactions', 'hashes']: nc.evidence_counts = {int(k): v for k, v in nc.evidence_counts.items()} nc.belief_scores = {int(k): v for k, v in nc.belief_scores.items()} nc.source_counts = {int(k): v for k, v in nc.source_counts.items()} # Check calculated values. if nc.next_offset is None: nc.next_offset = next_offset else: assert nc.next_offset == next_offset, "Offsets don't match." assert nc.total_evidence == total_evidence,\ "Evidence counts don't match." return nc def json(self) -> dict: """Return the JSON representation of the results.""" if not isinstance(self.results, dict) \ and not isinstance(self.results, list): json_results = list(self.results) elif isinstance(self.results, dict): json_results = {str(k): v for k, v in self.results.items()} else: json_results = self.results return {'results': json_results, 'limit': self.limit, 'offset': self.offset, 'next_offset': self.next_offset, 'query_json': self.query_json, 'evidence_counts': self.evidence_counts, 'belief_scores': self.belief_scores, 'source_counts': self.source_counts, 'total_evidence': self.total_evidence, 'result_type': self.result_type, 'offset_comp': self.offset_comp} class StatementQueryResult(QueryResult): """The result of a query to retrieve Statements. This class encapsulates the results of a search for statements in the database. This standardizes the results of such searches. Parameters ---------- results : dict The results of the query. limit : int The content limit that was used in making the query. offset : int The offset that was used in making the query. evidence_counts : dict The count of evidence available for each element. belief_scores : dict The score for each element. returned_evidence : int The count of evidence that was returned in this query. source_counts : dict The counts of evidence from each source for each element. query_json : dict The JSON representation of the query that was used. Attributes ---------- results : dict The results of the query keyed by unique IDs (mk_hash for PA Statements, IDs for Raw Statements, etc.) limit : int The limit that was applied to this query. query_json : dict A description of the query that was used. """ def __init__(self, results: dict, limit: int, offset: int, evidence_counts: dict, belief_scores: dict, returned_evidence: int, source_counts: dict, query_json: dict): super(StatementQueryResult, self).__init__(results, limit, offset, len(results), evidence_counts, belief_scores, source_counts, query_json, 'statements') self.returned_evidence = returned_evidence @classmethod def empty(cls, limit: int, offset: int, query_json: dict): return cls({}, limit, offset, {}, {}, 0, {}, query_json) def json(self) -> dict: """Get the JSON dump of the results.""" json_dict = super(StatementQueryResult, self).json() json_dict.update({'returned_evidence': self.returned_evidence}) return json_dict @classmethod def from_json(cls, json_dict): json_dict = json_dict.copy() result_type = json_dict.pop('result_type') json_dict.pop('offset_comp', None) if result_type != 'statements': raise ValueError(f'Invalid result type {result_type} for this ' f'result class {cls}') nc = super(StatementQueryResult, cls)._parse_json(json_dict) return nc def statements(self) -> list: """Get a list of Statements from the results.""" assert isinstance(self.results, dict), "Results must be a dictionary." return stmts_from_json(list(self.results.values())) class AgentQueryResult(QueryResult): """The result of a query for agent JSONs.""" def __init__(self, results: dict, limit: int, offset: int, num_rows: int, complexes_covered: set, evidence_counts: dict, belief_scores: dict, source_counts: dict, query_json: dict): super(AgentQueryResult, self).__init__(results, limit, offset, num_rows, evidence_counts, belief_scores, source_counts, query_json, 'agents') self.complexes_covered = complexes_covered @classmethod def empty(cls, limit, offset, query_json): return cls({}, limit, offset, 0, set(), {}, {}, {}, query_json) def json(self) -> dict: json_dict = super(AgentQueryResult, self).json() json_dict['complexes_covered'] = [str(h) for h in self.complexes_covered] return json_dict @classmethod def from_json(cls, json_dict): json_dict = json_dict.copy() result_type = json_dict.pop('result_type') if result_type != 'agents': raise ValueError(f'Invalid result type {result_type} for this ' f'result class {cls}') nc = super(AgentQueryResult, cls)._parse_json(json_dict) nc.complexes_covered = {int(h) for h in nc.complexes_covered}
{ "repo_name": "johnbachman/indra", "path": "indra/sources/indra_db_rest/query_results.py", "copies": "3", "size": "9836", "license": "bsd-2-clause", "hash": 6001363268759823000, "line_mean": 39.9833333333, "line_max": 81, "alpha_frac": 0.5905856039, "autogenerated": false, "ratio": 4.162505289885739, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00035945498343872324, "num_lines": 240 }
__all__ = ['query'] from In.db.database_controller import * #from In.db.database_model_wrapper import * from In.db.db_engine_base import * from In.db.query import * @IN.register('sql', type = 'theme_format') class SQL(In.themer.TEXT): '''SQL Theme fotmat class ''' @IN.register('DBException', type = 'exception_handler') class DBException(Exception): '''Base Exception for all database related errors. ''' #def handle_exception(self, context): #pass @IN.register('DBConnectionFailedException', type = 'exception_handler') class DBConnectionFailedException(DBException): '''Exception DBConnectFailedException. ''' @IN.register('DBObjectUnknownTypeException', type = 'exception_handler') class DBObjectUnknownTypeException(DBException): '''Exception DBObjectUnknownTypeException. ''' @IN.register('DBEngineInitializationException', type = 'exception_handler') class DBEngineInitializationException(DBException): '''Exception DBEngineInitializationException. ''' class DBTableUnknownTypeException(DBException): '''Exception DBTableUnknownTypeException. ''' class DBArgumentException(DBException): '''Exception DBArgumentException. ''' class DBColumnNotFoundException(DBException): '''Exception DBColumnNotFoundException. ''' @IN.register def register(): return { 'class_members' : { 'db_table' : { '__query_builder__' : { 'name' : '__query_builder__', 'instance' : 'instance', # class, instance, perobject, whether the member is instance or class }, '__db_controller__' : { 'name' : '__db_controller__', 'instance' : 'instance', # class, instance, perobject, whether the member is instance or class }, }, }, } # default columns columns = { 'id' : { 'type' : 'bigserial', }, 'type' : { 'type' : 'varchar', 'length' : 64, }, 'status' : { 'type' : 'smallint', }, 'created' : { 'type' : 'timestamp', }, 'changed' : { 'type' : 'timestamp', }, }
{ "repo_name": "vinoth3v/In", "path": "In/db/__init__.py", "copies": "1", "size": "2014", "license": "apache-2.0", "hash": -8627469080494509000, "line_mean": 22.5609756098, "line_max": 99, "alpha_frac": 0.6569016882, "autogenerated": false, "ratio": 3.425170068027211, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4582071756227211, "avg_score": null, "num_lines": null }
__all__ = ['Query', 'Tag', 'And', 'Or', 'Not'] class Query(object): """ Taxon is queried by ``tuple`` objects that represent the syntax tree of the query. These tuple queries can become unwieldy quickly, so the query DSL is provided as the main way to query Taxon. All subclasses of ``Query`` implement a ``freeze`` method which builds the tuple representation of the query. """ def __init__(self, expr): self.expr = expr self.children = [] def __and__(self, other): return And(self, other) def __or__(self, other): return Or(self, other) def __invert__(self): return Not(self) @classmethod def coerce(self, expr): "Returns the ``Query`` representation of the expression." if isinstance(expr, Query): return expr elif isinstance(expr, basestring): return Tag(expr) else: raise TypeError("Expected %s or string, got %s" % (Query.__name__, expr)) def freeze(self): "Returns a hashable representation of the query expression." return (self.op, tuple(c.freeze() for c in self.children)) class Tag(Query): "Returns the items with the specified tag." def freeze(self): return ("tag", [self.expr]) class And(Query): "Returns the items matched by all ``Query`` expressions." op = "and" def __init__(self, *exprs): self.children = [Query.coerce(e) for e in exprs] class Or(Query): "Returns the items matched by any or all of the ``Query`` expressions." op = "or" def __init__(self, *exprs): self.children = [Query.coerce(e) for e in exprs] class Not(Query): "Returns the items **not** matched by any of the ``Query`` expressions." def __init__(self, expr): self.expr = Query.coerce(expr) def freeze(self): return ("not", tuple([self.expr.freeze()]))
{ "repo_name": "jdp/taxon", "path": "taxon/query.py", "copies": "1", "size": "1928", "license": "mit", "hash": 850787598349529200, "line_mean": 25.4109589041, "line_max": 85, "alpha_frac": 0.5985477178, "autogenerated": false, "ratio": 3.9508196721311477, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00015928639694170118, "num_lines": 73 }
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') import collections import heapq import warnings from . import events from . import locks class QueueEmpty(Exception): """Raised when Queue.get_nowait() is called on an empty Queue.""" pass class QueueFull(Exception): """Raised when the Queue.put_nowait() method is called on a full Queue.""" pass class Queue: """A queue, useful for coordinating producer and consumer coroutines. If maxsize is less than or equal to zero, the queue size is infinite. If it is an integer greater than 0, then "await put()" will block when the queue reaches maxsize, until an item is removed by get(). Unlike the standard library Queue, you can reliably know this Queue's size with qsize(), since your single-threaded asyncio application won't be interrupted between calling qsize() and doing an operation on the Queue. """ def __init__(self, maxsize=0, *, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) self._maxsize = maxsize # Futures. self._getters = collections.deque() # Futures. self._putters = collections.deque() self._unfinished_tasks = 0 self._finished = locks.Event(loop=loop) self._finished.set() self._init(maxsize) # These three are overridable in subclasses. def _init(self, maxsize): self._queue = collections.deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) # End of the overridable methods. def _wakeup_next(self, waiters): # Wake up the next waiter (if any) that isn't cancelled. while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) break def __repr__(self): return f'<{type(self).__name__} at {id(self):#x} {self._format()}>' def __str__(self): return f'<{type(self).__name__} {self._format()}>' def _format(self): result = f'maxsize={self._maxsize!r}' if getattr(self, '_queue', None): result += f' _queue={list(self._queue)!r}' if self._getters: result += f' _getters[{len(self._getters)}]' if self._putters: result += f' _putters[{len(self._putters)}]' if self._unfinished_tasks: result += f' tasks={self._unfinished_tasks}' return result def qsize(self): """Number of items in the queue.""" return len(self._queue) @property def maxsize(self): """Number of items allowed in the queue.""" return self._maxsize def empty(self): """Return True if the queue is empty, False otherwise.""" return not self._queue def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._maxsize <= 0: return False else: return self.qsize() >= self._maxsize async def put(self, item): """Put an item into the queue. Put an item into the queue. If the queue is full, wait until a free slot is available before adding item. """ while self.full(): putter = self._loop.create_future() self._putters.append(putter) try: await putter except: putter.cancel() # Just in case putter is not done yet. try: # Clean self._putters from canceled putters. self._putters.remove(putter) except ValueError: # The putter could be removed from self._putters by a # previous get_nowait call. pass if not self.full() and not putter.cancelled(): # We were woken up by get_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._putters) raise return self.put_nowait(item) def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ if self.full(): raise QueueFull self._put(item) self._unfinished_tasks += 1 self._finished.clear() self._wakeup_next(self._getters) async def get(self): """Remove and return an item from the queue. If queue is empty, wait until an item is available. """ while self.empty(): getter = self._loop.create_future() self._getters.append(getter) try: await getter except: getter.cancel() # Just in case getter is not done yet. try: # Clean self._getters from canceled getters. self._getters.remove(getter) except ValueError: # The getter could be removed from self._getters by a # previous put_nowait call. pass if not self.empty() and not getter.cancelled(): # We were woken up by put_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._getters) raise return self.get_nowait() def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ if self.empty(): raise QueueEmpty item = self._get() self._wakeup_next(self._putters) return item def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ if self._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() async def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ if self._unfinished_tasks > 0: await self._finished.wait() class PriorityQueue(Queue): """A subclass of Queue; retrieves entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). """ def _init(self, maxsize): self._queue = [] def _put(self, item, heappush=heapq.heappush): heappush(self._queue, item) def _get(self, heappop=heapq.heappop): return heappop(self._queue) class LifoQueue(Queue): """A subclass of Queue that retrieves most recently added entries first.""" def _init(self, maxsize): self._queue = [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop()
{ "repo_name": "batermj/algorithm-challenger", "path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/queues.py", "copies": "10", "size": "8223", "license": "apache-2.0", "hash": -3466556441225962000, "line_mean": 32.0240963855, "line_max": 79, "alpha_frac": 0.5727836556, "autogenerated": false, "ratio": 4.348492860920148, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9921276516520148, "avg_score": null, "num_lines": null }
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') import collections import heapq from . import events from . import locks class QueueEmpty(Exception): """Raised when Queue.get_nowait() is called on an empty Queue.""" pass class QueueFull(Exception): """Raised when the Queue.put_nowait() method is called on a full Queue.""" pass class Queue: """A queue, useful for coordinating producer and consumer coroutines. If maxsize is less than or equal to zero, the queue size is infinite. If it is an integer greater than 0, then "await put()" will block when the queue reaches maxsize, until an item is removed by get(). Unlike the standard library Queue, you can reliably know this Queue's size with qsize(), since your single-threaded asyncio application won't be interrupted between calling qsize() and doing an operation on the Queue. """ def __init__(self, maxsize=0, *, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop self._maxsize = maxsize # Futures. self._getters = collections.deque() # Futures. self._putters = collections.deque() self._unfinished_tasks = 0 self._finished = locks.Event(loop=self._loop) self._finished.set() self._init(maxsize) # These three are overridable in subclasses. def _init(self, maxsize): self._queue = collections.deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) # End of the overridable methods. def _wakeup_next(self, waiters): # Wake up the next waiter (if any) that isn't cancelled. while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) break def __repr__(self): return f'<{type(self).__name__} at {id(self):#x} {self._format()}>' def __str__(self): return f'<{type(self).__name__} {self._format()}>' def _format(self): result = f'maxsize={self._maxsize!r}' if getattr(self, '_queue', None): result += f' _queue={list(self._queue)!r}' if self._getters: result += f' _getters[{len(self._getters)}]' if self._putters: result += f' _putters[{len(self._putters)}]' if self._unfinished_tasks: result += f' tasks={self._unfinished_tasks}' return result def qsize(self): """Number of items in the queue.""" return len(self._queue) @property def maxsize(self): """Number of items allowed in the queue.""" return self._maxsize def empty(self): """Return True if the queue is empty, False otherwise.""" return not self._queue def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._maxsize <= 0: return False else: return self.qsize() >= self._maxsize async def put(self, item): """Put an item into the queue. Put an item into the queue. If the queue is full, wait until a free slot is available before adding item. """ while self.full(): putter = self._loop.create_future() self._putters.append(putter) try: await putter except: putter.cancel() # Just in case putter is not done yet. try: # Clean self._putters from canceled putters. self._putters.remove(putter) except ValueError: # The putter could be removed from self._putters by a # previous get_nowait call. pass if not self.full() and not putter.cancelled(): # We were woken up by get_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._putters) raise return self.put_nowait(item) def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ if self.full(): raise QueueFull self._put(item) self._unfinished_tasks += 1 self._finished.clear() self._wakeup_next(self._getters) async def get(self): """Remove and return an item from the queue. If queue is empty, wait until an item is available. """ while self.empty(): getter = self._loop.create_future() self._getters.append(getter) try: await getter except: getter.cancel() # Just in case getter is not done yet. try: # Clean self._getters from canceled getters. self._getters.remove(getter) except ValueError: # The getter could be removed from self._getters by a # previous put_nowait call. pass if not self.empty() and not getter.cancelled(): # We were woken up by put_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._getters) raise return self.get_nowait() def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ if self.empty(): raise QueueEmpty item = self._get() self._wakeup_next(self._putters) return item def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ if self._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() async def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ if self._unfinished_tasks > 0: await self._finished.wait() class PriorityQueue(Queue): """A subclass of Queue; retrieves entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). """ def _init(self, maxsize): self._queue = [] def _put(self, item, heappush=heapq.heappush): heappush(self._queue, item) def _get(self, heappop=heapq.heappop): return heappop(self._queue) class LifoQueue(Queue): """A subclass of Queue that retrieves most recently added entries first.""" def _init(self, maxsize): self._queue = [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop()
{ "repo_name": "int19h/PTVS", "path": "Python/Product/Miniconda/Miniconda3-x64/Lib/asyncio/queues.py", "copies": "11", "size": "8003", "license": "apache-2.0", "hash": -4651763129073597000, "line_mean": 31.6653061224, "line_max": 79, "alpha_frac": 0.5730351118, "autogenerated": false, "ratio": 4.316612729234088, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00040816326530612246, "num_lines": 245 }
"""All queue-related views.""" from .forms import InquiryForm from .forms import PromotionForm from .logic import get_inquiry_for_asker from .logic import maybe_promote_current_user from .logic import update_context_with_queue_config from flask import abort from flask import Blueprint from flask import g from flask import redirect from flask import request from quupod.forms import choicify from quupod.models import Inquiry from quupod.models import Participant from quupod.models import User from quupod.models import Queue from quupod.utils import emitQueueInfo from quupod.utils import emitQueuePositions from quupod.views import current_user from quupod.views import render from quupod.views import url_for queue = Blueprint( 'queue', __name__, url_prefix='/<string:queue_url>', template_folder='templates') @queue.url_defaults def add_queue_url(endpoint: str, values: dict) -> None: """Add information to every URL build.""" values.setdefault('queue_url', getattr(g, 'queue_url', None)) @queue.url_value_preprocessor def pull_queue_url(endpoint: str, values: dict) -> None: """Extract information from the queue URL.""" g.queue_url = values.pop('queue_url') g.queue = Queue.query.filter_by(url=g.queue_url).one_or_none() if not g.queue: abort(404) def render_queue(template: str, *args, **context) -> str: """Special rendering for queue.""" maybe_promote_current_user() update_context_with_queue_config(context) context.setdefault('queue', g.queue) return render(template, *args, **context) ######### # QUEUE # ######### @queue.route('/') def home() -> str: """List all unresolved inquiries for the homepage.""" if current_user().can('help'): return redirect(url_for('admin.home')) return render_queue( 'landing.html', num_inquiries=Inquiry.get_num_unresolved(), ttr=g.queue.ttr()) @queue.route('/promote/<string:role_name>', methods=['POST', 'GET']) @queue.route('/promote') def promote(role_name: str=None) -> str: """Promote the user accessing this page.""" if not current_user().is_authenticated: abort(401, 'You need to be logged in to promote an account!') part = Participant.get_from_user(current_user()) if part and part.role.name == 'Owner' and g.queue.get_num_owners() <= 1: abort(401, 'You cannot demote yourself from owner until another owner' ' has been added.') if not role_name: return render_queue( 'roles.html', title='Promotion Form', message='Welcome. Please select a role below.', roles=g.queue.get_roles_for_promotion()) form = PromotionForm(request.form) if request.method == 'POST' or g.queue.get_code_for_role(role_name) == '*': if not g.queue.is_promotion_valid(role_name, request.form['code']): form.errors.setdefault('code', []).append('Incorrect code.') return render_queue( 'form.html', form=form, submit='Promote', back=url_for('queue.promote')) Participant.update_or_create(current_user(), role_name) return render_queue( 'confirm.html', title='Promotion Success', message='You have been promoted to %s' % role_name, action='Onward', url=url_for('admin.home')) return render_queue( 'form.html', form=form, submit='Promote', back=url_for('queue.promote')) ######## # FLOW # ######## @queue.route('/request', methods=['POST', 'GET']) def inquiry() -> str: """Place a new request. This request which may be authored by either a system user or an anonymous user. """ user = current_user() if not user.is_authenticated and \ g.queue.setting(name='require_login').enabled: return render_queue( 'confirm.html', title='Login Required', message='Login to add an inquiry, and start using this queue.') form = InquiryForm(request.form, obj=user) n = int(g.queue.setting(name='max_requests').value) if User.get_num_current_requests(request.form.get('name', None)) >= n: if not current_user().is_authenticated: message = 'If you haven\'t submitted a request, try' ' logging in and re-requesting.' else: message = 'Would you like to cancel your oldest request?' return render_queue( 'confirm.html', title='Oops', message='Looks like you\'ve reached the maximum number of times ' 'you can add yourself to the queue at once (<code>%d</code>). ' '%s' % (n, message), action='Cancel Oldest Request', url=url_for('queue.cancel')) form.location.choices = choicify(g.queue.setting('locations').value) form.category.choices = choicify(g.queue.setting('inquiry_types').value) if request.method == 'POST' and form.validate() and \ g.queue.is_valid_assignment(request, form): inquiry = Inquiry(**request.form).update(queue_id=g.queue.id) if current_user().is_authenticated: inquiry.owner_id = current_user().id inquiry.save() emitQueueInfo(g.queue) return redirect(url_for('queue.waiting', inquiry_id=inquiry.id)) return render_queue( 'form.html', form=form, title='Request Help', submit='Request Help') @queue.route('/cancel/<int:inquiry_id>') @queue.route('/cancel') def cancel(inquiry_id: int=None) -> str: """Cancel placed request.""" inquiry = get_inquiry_for_asker(inquiry_id) if inquiry.is_owned_by_current_user(): inquiry.close() else: abort(401, 'You cannot cancel another user\'s request. This incident' ' has been logged.') emitQueuePositions(inquiry) emitQueueInfo(inquiry.queue) return redirect(url_for('queue.home')) @queue.route('/waiting/<int:inquiry_id>') @queue.route('/waiting') def waiting(inquiry_id: int=None) -> str: """Screen shown after user has placed request and is waiting.""" inquiry = get_inquiry_for_asker(inquiry_id) return render_queue( 'waiting.html', position=inquiry.current_position(), group=inquiry.get_similar_inquiries(), inquiry=inquiry, details='Location: %s, Assignment: %s, Problem: %s, Request: %s' % ( inquiry.location, inquiry.assignment, inquiry.problem, inquiry.to_local('created_at').created_at.humanize())) ################ # LOGIN/LOGOUT # ################ @queue.route('/login', methods=['POST', 'GET']) def login() -> str: """Login using globally defined login procedure.""" from quupod.public.views import login return login( home=url_for('queue.home', _external=True), login=url_for('queue.login', _external=True)) @queue.route('/logout') def logout() -> str: """Logout using globally defined logout procedure.""" from quupod.public.views import logout return logout(home=url_for('queue.home', _external=True))
{ "repo_name": "alvinwan/quuupod", "path": "quupod/queue/views.py", "copies": "2", "size": "7189", "license": "apache-2.0", "hash": -3741760306865125400, "line_mean": 33.0710900474, "line_max": 79, "alpha_frac": 0.6277646404, "autogenerated": false, "ratio": 3.6161971830985915, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5243961823498592, "avg_score": null, "num_lines": null }
"""All random generators for Terrain class.""" from terrain import Terrain import random import abc import math class TerrainGenerator(object): """Abstract noise generator that makes a Terrain with heights produced from noise.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def __call__(self, *args, **kwargs): """Generate a Terrain with heights corresponding to noise. Returns: Terrain: New Terrain with heights corresponding to noise. """ class DiamondSquareGenerator(TerrainGenerator): """Terrain generator that used diamond-square algorithm.""" def __init__(self, amp_from_freq): """ Args: amp_from_freq (function): Function that converts frequency to maximum amplitude. """ self.amp_from_freq = amp_from_freq def __call__(self, side_exp): """Generate a Terrain with heights corresponding to noise. Used diamond-square algorithm, with frequency of noise at each step doubling. Colored_noise will use the frequency to generate the offset noise. Width and length are equal, and length must be of form 2**n + 1 (n >= 0). Args: side_exp (int): Exponent of side length. Length of side is 2**side_exp + 1. Returns: Terrain: New Terrain with heights corresponding to noise. """ side_len = (2 ** side_exp) + 1 ter = Terrain(side_len, side_len) return self._divide(self._initialize_corners(ter, 0.5), side_len-1) def _initialize_corners(self, terrain, init_val): """Initialize corner values of terrain. Args: terrain (Terrain): Terrain to initialize edges of. init_val (float): Initial value to set all corners to, between 0 and 1. Returns: Terrain: Terrain with corners set to init_val. """ terrain[0, 0] = init_val terrain[0, terrain.length-1] = init_val terrain[terrain.width-1, 0] = init_val terrain[terrain.width-1, terrain.length-1] = init_val return terrain def _divide(self, terrain, square_len): """Divide terrain into squares and process each square recursively. Goes through each square, altering midpoint. After this, go through each diamond, altering edges. (Altering constitutes setting the value to an average of adjacent values and adding a noise offset.) Once all are done, halve size of each square and do _divide again on half-size squares. Once square size goes below 1, finish. Args: terrain (Terrain): Terrain to manipulate. Must have corners initialized. square_len (int): Current length of one side of a square. Returns: Terrain: New terrain with generated values. """ half = square_len / 2 if half < 1: return terrain else: # loop through all squares for y in range(half, terrain.length, square_len): for x in range(half, terrain.width, square_len): terrain = self._update_square(terrain, x, y, square_len) # loop through all diamonds for y in range(0, terrain.length, half): for x in range((y + half) % square_len, terrain.width, square_len): terrain = self._update_diamond(terrain, x, y, square_len) return self._divide(terrain, half) def _update_square(self, terrain, x, y, square_len): """Update the midpoint of a square. Midpoint becomes average of square corners plus a random offset determined by noise. Args: terrain (Terrain): Terrain to update. x (int): X coordinate of center of square. y (int): Y coordinate of center of square. square_len (int): Length of one side of square. Returns: Terrain: New terrain with updated square center. """ half_len = square_len / 2 # Impossible to attempt to access neighbours out of terrain bounds mean_height = sum([terrain[x - half_len, y - half_len], terrain[x - half_len, y + half_len], terrain[x + half_len, y - half_len], terrain[x + half_len, y + half_len]]) / 4.0 frequency = terrain.length / square_len offset = (random.random() - 0.5) * self.amp_from_freq(frequency) if not 0 <= mean_height + offset <= 1: if mean_height + offset > 1: terrain[x, y] = 1 else: terrain[x, y] = 0 else: terrain[x, y] = mean_height + offset return terrain def _update_diamond(self, terrain, x, y, diamond_len): """Update the midpoint of a diamond. Midpoint becomes average of diamond corners plus a random offset determined by noise. Args: terrain (Terrain): Terrain to update. x (int): X coordinate of center of diamond. y (int): Y coordinate of center of diamond. diamond_len (int): Length of one corner of diamond to other. Returns: Terrain: New terrain with updated square center. """ half_len = diamond_len / 2 # If on edge of terrain, only access 3 neighbours to avoid leaving terrain bounds neighbours = [] if x != 0: neighbours.append(terrain[x - half_len, y]) if y != 0: neighbours.append(terrain[x, y - half_len]) if x != terrain.width - 1: neighbours.append(terrain[x + half_len, y]) if y != terrain.length - 1: neighbours.append(terrain[x, y + half_len]) mean_height = sum(neighbours) / float(len(neighbours)) frequency = terrain.length / diamond_len offset = (random.random() - 0.5) * self.amp_from_freq(frequency) if not 0 <= mean_height + offset <= 1: if mean_height + offset > 1: terrain[x, y] = 1 else: terrain[x, y] = 0 else: terrain[x, y] = mean_height + offset return terrain class RedNoiseGenerator(DiamondSquareGenerator): """Diamond square terrain generator with red noise (amplitude = 1 / (frequency^2)).""" def __new__(cls, *args, **kwargs): return DiamondSquareGenerator(lambda f: f ** -2) class PinkNoiseGenerator(DiamondSquareGenerator): """Diamond square terrain generator with pink noise (amplitude = 1 / frequency).""" def __new__(cls, *args, **kwargs): return DiamondSquareGenerator(lambda f: f ** -1) class WhiteNoiseGenerator(DiamondSquareGenerator): """Diamond square terrain generator with white noise (amplitude = 1).""" def __new__(cls, *args, **kwargs): return DiamondSquareGenerator(lambda f: 1) class BlueNoiseGenerator(DiamondSquareGenerator): """Diamond square terrain generator with blue noise (amplitude = frequency).""" def __new__(cls, *args, **kwargs): return DiamondSquareGenerator(lambda f: f) class VioletNoiseGenerator(DiamondSquareGenerator): """Diamond square terrain generator with violet noise (amplitude = frequency^2).""" def __new__(cls, *args, **kwargs): return DiamondSquareGenerator(lambda f: f ** 2) class PerlinGenerator(TerrainGenerator): """Terrain generator that uses Perlin noise algorithm.""" def __init__(self, square_len, width_in_squares, length_in_squares): """ Args: square_len (int): Length of one side of a square in Perlin noise grid. Is > 0. width_in_squares (int): Width of generated terrain in grid squares. Is > 0. length_in_squares (int): Length of generated terrain in grid squares. Is > 0. """ self._square_len = square_len self._width_in_squares = width_in_squares self._length_in_squares = length_in_squares self._linearly_interpolated = False self._init_gradients(1) def _init_gradients(self, vec_magnitude): """Initialize all gradient vectors to be in random directions with the same magnitude. Args: vec_magnitude (float): Magnitude of all gradient vectors. """ self._grad_vecs = [[(0, 0) for _ in range(self._width_in_squares+1)] for _ in range(self._length_in_squares+1)] """list[list[tuple(float, float)]]: Grid of gradient vectors.""" for x in range(self._width_in_squares+1): for y in range(self._length_in_squares+1): x_val = (random.random() - 0.5) * 2 * vec_magnitude y_val = math.sqrt(vec_magnitude**2 - x_val**2) * random.choice([1, -1]) self._grad_vecs[y][x] = (x_val, y_val) def __call__(self, linearly_interpolated=False): """Generate terrain via Perlin noise. Args: linearly_interpolated (bool): Whether to linearly interpolate values or use cubic function. Returns: Terrain: Generated terrain. """ self._linearly_interpolated = bool(linearly_interpolated) terr = Terrain(self._square_len * self._width_in_squares, self._square_len * self._length_in_squares) for x in range(terr.width): for y in range(terr.length): terr[x, y] = self._get_noise_at(x, y) return terr def _get_noise_at(self, x, y): """Get perlin noise at a point in terrain. Does this by choosing a random gradient vector for each grid corner (done at initialization) and taking their dot products with the displacement vectors to each point in the grid. The generated values are then interpolated between based on distance to each corner from the desired point. Args: x (int): X coordinate of requested point. y (int): Y coordinate of requested point. Returns: float: Height of point on terrain, between 0 and 1 inclusive. """ grid_x = x / float(self._square_len) # X value within grid of gradient vectors grid_y = y / float(self._square_len) # Y value within grid of gradient vectors left_x, right_x, upper_y, lower_y = self._get_corners(grid_x, grid_y) x_weight = grid_x - left_x y_weight = grid_y - upper_y # ul = upper left, lr = lower right, etc. ul_influence_val = self._get_influence_val(left_x, upper_y, grid_x, grid_y) ur_influence_val = self._get_influence_val(right_x, upper_y, grid_x, grid_y) ll_influence_val = self._get_influence_val(left_x, lower_y, grid_x, grid_y) lr_influence_val = self._get_influence_val(right_x, lower_y, grid_x, grid_y) # Interpolate between top two and bottom two influence vals, then interpolate between them using y_weight upper_influence_val = self._interpolate_between(ul_influence_val, ur_influence_val, x_weight) lower_influence_val = self._interpolate_between(ll_influence_val, lr_influence_val, x_weight) interpolated_val = self._interpolate_between(upper_influence_val, lower_influence_val, y_weight) # Normalize interpolated_val to be between 0 and 1, return as height # Can range from 0.5 to -0.5, add 0.5 to achieve proper result height = interpolated_val + 0.5 # Some margin of error, ensure is still between 0 and 1 return round(height) if not 0 <= height <= 1 else height def _get_corners(self, x, y): """Get coordinates of corners around point in gradients grid. Args: x (float): X coordinate of point in gradient grid. y (float): Y coordinate of point in gradient grid. Returns: tuple(int, int, int, int): Tuple of left x, right x, upper y and lower y. """ left_x = (int(x)-1) if x == self._width_in_squares else int(x) right_x = int(x) if x == self._width_in_squares else (int(x) + 1) upper_y = (int(y)-1) if y == self._length_in_squares else int(y) lower_y = int(y) if y == self._length_in_squares else (int(y) + 1) return left_x, right_x, upper_y, lower_y def _get_influence_val(self, vec_x, vec_y, x, y): """Get influence value from a corner on grid for a point. This value is the dot product of the displacement and gradient vectors at that corner. Four of these for all four corners surrounding a point will be interpolated between to get the point's height. Args: vec_x (int): X coordinate of corner to get gradient and displacement vectors from. vec_y (int): Y coordinate of corner to get gradient and displacement vectors from. x (float): X coordinate of point to get influence value for, normalized to be within gradients grid. y (float): Y coordinate of point to get influence value for, normalized to be within gradients grid. Returns: float: Influence value of corner (vec_x, vec_y) for point (x, y). """ disp_x = x - vec_x disp_y = y - vec_y grad_x, grad_y = self._grad_vecs[vec_y][vec_x] return grad_x*disp_x + grad_y*disp_y def _interpolate_between(self, val0, val1, weight): """Interpolate between two values given a weight. Will be linear if self._linearly_interpolated is True, or via a smooth function otherwise. Args: val0 (float): First value to interpolate from. val1 (float): Second value to interpolate from. weight (float): Weighting of interpolation. Is between 0 and 1; 0 means == val0, 1 means == val1. Returns: float: Result of interpolation between val0 and val1. """ if self._linearly_interpolated: return (1 - weight)*val0 + weight*val1 else: return self._smoothen_weight(1 - weight)*val0 + self._smoothen_weight(weight)*val1 @staticmethod def _smoothen_weight(x): return 6*(x**5) - 15*(x**4) + 10*(x**3)
{ "repo_name": "jackromo/RandTerrainPy", "path": "randterrainpy/terraingen.py", "copies": "1", "size": "14157", "license": "mit", "hash": -2359723875979347500, "line_mean": 38.9915254237, "line_max": 119, "alpha_frac": 0.6079677898, "autogenerated": false, "ratio": 3.8680327868852458, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4976000576685246, "avg_score": null, "num_lines": null }
__all__ = ['ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', 'ndenumerate','ndindex', 'fill_diagonal','diag_indices','diag_indices_from'] import sys import numpy.core.numeric as _nx from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, arange ) from numpy.core.numerictypes import find_common_type import math import function_base import numpy.matrixlib as matrix from function_base import diff from numpy.lib._compiled_base import ravel_multi_index, unravel_index from numpy.lib.stride_tricks import as_strided makemat = matrix.matrix def ix_(*args): """ Construct an open mesh from multiple sequences. This function takes N 1-D sequences and returns N outputs with N dimensions each, such that the shape is 1 in all but one dimension and the dimension with the non-unit shape value cycles through all N dimensions. Using `ix_` one can quickly construct index arrays that will index the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. Parameters ---------- args : 1-D sequences Returns ------- out : tuple of ndarrays N arrays with N dimensions each, with N the number of input sequences. Together these arrays form an open mesh. See Also -------- ogrid, mgrid, meshgrid Examples -------- >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> ixgrid = np.ix_([0,1], [2,4]) >>> ixgrid (array([[0], [1]]), array([[2, 4]])) >>> ixgrid[0].shape, ixgrid[1].shape ((2, 1), (1, 2)) >>> a[ixgrid] array([[2, 4], [7, 9]]) """ out = [] nd = len(args) baseshape = [1]*nd for k in range(nd): new = _nx.asarray(args[k]) if (new.ndim != 1): raise ValueError("Cross index must be 1 dimensional") if issubclass(new.dtype.type, _nx.bool_): new = new.nonzero()[0] baseshape[k] = len(new) new = new.reshape(tuple(baseshape)) out.append(new) baseshape[k] = 1 return tuple(out) class nd_grid(object): """ Construct a multi-dimensional "meshgrid". ``grid = nd_grid()`` creates an instance which will return a mesh-grid when indexed. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. If instantiated with an argument of ``sparse=True``, the mesh-grid is open (or not fleshed out) so that only one-dimension of each returned argument is greater than 1. Parameters ---------- sparse : bool, optional Whether the grid is sparse or not. Default is False. Notes ----- Two instances of `nd_grid` are made available in the NumPy namespace, `mgrid` and `ogrid`:: mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) Users should use these pre-defined instances instead of using `nd_grid` directly. Examples -------- >>> mgrid = np.lib.index_tricks.nd_grid() >>> mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])] """ def __init__(self, sparse=False): self.sparse = sparse def __getitem__(self,key): try: size = [] typ = int for k in range(len(key)): step = key[k].step start = key[k].start if start is None: start=0 if step is None: step=1 if isinstance(step, complex): size.append(int(abs(step))) typ = float else: size.append(math.ceil((key[k].stop - start)/(step*1.0))) if isinstance(step, float) or \ isinstance(start, float) or \ isinstance(key[k].stop, float): typ = float if self.sparse: nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \ (typ,)*len(size)) else: nn = _nx.indices(size, typ) for k in range(len(size)): step = key[k].step start = key[k].start if start is None: start=0 if step is None: step=1 if isinstance(step, complex): step = int(abs(step)) if step != 1: step = (key[k].stop - start)/float(step-1) nn[k] = (nn[k]*step+start) if self.sparse: slobj = [_nx.newaxis]*len(size) for k in range(len(size)): slobj[k] = slice(None,None) nn[k] = nn[k][slobj] slobj[k] = _nx.newaxis return nn except (IndexError, TypeError): step = key.step stop = key.stop start = key.start if start is None: start = 0 if isinstance(step, complex): step = abs(step) length = int(step) if step != 1: step = (key.stop-start)/float(step-1) stop = key.stop+step return _nx.arange(0, length,1, float)*step + start else: return _nx.arange(start, stop, step) def __getslice__(self,i,j): return _nx.arange(i,j) def __len__(self): return 0 mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) mgrid.__doc__ = None # set in numpy.add_newdocs ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """ Translates slice objects to concatenation along an axis. For detailed documentation on usage, see `r_`. """ def _retval(self, res): if self.matrix: oldndim = res.ndim res = makemat(res) if oldndim == 1 and self.col: res = res.T self.axis = self._axis self.matrix = self._matrix self.col = 0 return res def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): self._axis = axis self._matrix = matrix self.axis = axis self.matrix = matrix self.col = 0 self.trans1d = trans1d self.ndmin = ndmin def __getitem__(self,key): trans1d = self.trans1d ndmin = self.ndmin if isinstance(key, str): frame = sys._getframe().f_back mymat = matrix.bmat(key,frame.f_globals,frame.f_locals) return mymat if type(key) is not tuple: key = (key,) objs = [] scalars = [] arraytypes = [] scalartypes = [] for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if isinstance(step, complex): size = int(abs(step)) newobj = function_base.linspace(start, stop, num=size) else: newobj = _nx.arange(start, stop, step) if ndmin > 1: newobj = array(newobj,copy=False,ndmin=ndmin) if trans1d != -1: newobj = newobj.swapaxes(-1,trans1d) elif isinstance(key[k],str): if k != 0: raise ValueError("special directives must be the " "first entry.") key0 = key[0] if key0 in 'rc': self.matrix = True self.col = (key0 == 'c') continue if ',' in key0: vec = key0.split(',') try: self.axis, ndmin = \ [int(x) for x in vec[:2]] if len(vec) == 3: trans1d = int(vec[2]) continue except: raise ValueError("unknown special directive") try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError("unknown special directive") elif type(key[k]) in ScalarType: newobj = array(key[k],ndmin=ndmin) scalars.append(k) scalar = True scalartypes.append(newobj.dtype) else: newobj = key[k] if ndmin > 1: tempobj = array(newobj, copy=False, subok=True) newobj = array(newobj, copy=False, subok=True, ndmin=ndmin) if trans1d != -1 and tempobj.ndim < ndmin: k2 = ndmin-tempobj.ndim if (trans1d < 0): trans1d += k2 + 1 defaxes = range(ndmin) k1 = trans1d axes = defaxes[:k1] + defaxes[k2:] + \ defaxes[k1:k2] newobj = newobj.transpose(axes) del tempobj objs.append(newobj) if not scalar and isinstance(newobj, _nx.ndarray): arraytypes.append(newobj.dtype) # Esure that scalars won't up-cast unless warranted final_dtype = find_common_type(arraytypes, scalartypes) if final_dtype is not None: for k in scalars: objs[k] = objs[k].astype(final_dtype) res = _nx.concatenate(tuple(objs),axis=self.axis) return self._retval(res) def __getslice__(self,i,j): res = _nx.arange(i,j) return self._retval(res) def __len__(self): return 0 # separate classes are used here instead of just making r_ = concatentor(0), # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) class RClass(AxisConcatenator): """ Translates slice objects to concatenation along the first axis. This is a simple way to build up arrays quickly. There are two use cases. 1. If the index expression contains comma separated arrays, then stack them along their first axis. 2. If the index expression contains slice notation or scalars then create a 1-D array with a range indicated by the slice notation. If slice notation is used, the syntax ``start:stop:step`` is equivalent to ``np.arange(start, stop, step)`` inside of the brackets. However, if ``step`` is an imaginary number (i.e. 100j) then its integer portion is interpreted as a number-of-points desired and the start and stop are inclusive. In other words ``start:stop:stepj`` is interpreted as ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. After expansion of slice notation, all comma separated sequences are concatenated together. Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 (column) matrix is produced. If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication of the minimum number of dimensions to force each entry into as the second integer (the axis to concatenate along is still the first integer). A string with three comma-separated integers allows specification of the axis to concatenate along, the minimum number of dimensions to force the entries to, and which axis should contain the start of the arrays which are less than the specified number of dimensions. In other words the third integer allows you to specify where the 1's should be placed in the shape of the arrays that have their shapes upgraded. By default, they are placed in the front of the shape tuple. The third argument allows you to specify where the start of the array should be instead. Thus, a third argument of '0' would place the 1's at the end of the array shape. Negative integers specify where in the new shape tuple the last dimension of upgraded arrays should be placed, so the default is '-1'. Parameters ---------- Not a function, so takes no parameters Returns ------- A concatenated ndarray or matrix. See Also -------- concatenate : Join a sequence of arrays together. c_ : Translates slice objects to concatenation along the second axis. Examples -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) String integers specify the axis to concatenate along or the minimum number of dimensions to force entries into. >>> a = np.array([[0, 1, 2], [3, 4, 5]]) >>> np.r_['-1', a, a] # concatenate along last axis array([[0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5]]) >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 array([[1, 2, 3], [4, 5, 6]]) >>> np.r_['0,2,0', [1,2,3], [4,5,6]] array([[1], [2], [3], [4], [5], [6]]) >>> np.r_['1,2,0', [1,2,3], [4,5,6]] array([[1, 4], [2, 5], [3, 6]]) Using 'r' or 'c' as a first string argument creates a matrix. >>> np.r_['r',[1,2,3], [4,5,6]] matrix([[1, 2, 3, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, 0) r_ = RClass() class CClass(AxisConcatenator): """ Translates slice objects to concatenation along the second axis. This is short-hand for ``np.r_['-1,2,0', index expression]``, which is useful because of its common occurrence. In particular, arrays will be stacked along their last axis after being upgraded to at least 2-D with 1's post-pended to the shape (column vectors made out of 1-D arrays). For detailed documentation, see `r_`. Examples -------- >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] array([[1, 2, 3, 0, 0, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) c_ = CClass() class ndenumerate(object): """ Multidimensional index iterator. Return an iterator yielding pairs of array coordinates and values. Parameters ---------- a : ndarray Input array. See Also -------- ndindex, flatiter Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print index, x (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4 """ def __init__(self, arr): self.iter = asarray(arr).flat def next(self): """ Standard iterator method, returns the index tuple and array value. Returns ------- coords : tuple of ints The indices of the current iteration. val : scalar The array element of the current iteration. """ return self.iter.coords, self.iter.next() def __iter__(self): return self class ndindex(object): """ An N-dimensional iterator object to index arrays. Given the shape of an array, an `ndindex` instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. Parameters ---------- `*args` : ints The size of each dimension of the array. See Also -------- ndenumerate, flatiter Examples -------- >>> for index in np.ndindex(3, 2, 1): ... print index (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0) """ def __init__(self, *shape): x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape)) self._it = _nx.nditer(x, flags=['multi_index'], order='C') def __iter__(self): return self def ndincr(self): """ Increment the multi-dimensional index by one. This method is for backward compatibility only: do not use. """ self.next() def next(self): """ Standard iterator method, updates the index and returns the index tuple. Returns ------- val : tuple of ints Returns a tuple containing the indices of the current iteration. """ self._it.next() return self._it.multi_index # You can do all this with slice() plus a few special objects, # but there's a lot to remember. This version is simpler because # it uses the standard array indexing syntax. # # Written by Konrad Hinsen <hinsen@cnrs-orleans.fr> # last revision: 1999-7-23 # # Cosmetic changes by T. Oliphant 2001 # # class IndexExpression(object): """ A nicer way to build up index tuples for arrays. .. note:: Use one of the two predefined instances `index_exp` or `s_` rather than directly using `IndexExpression`. For any index combination, including slicing and axis insertion, ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any array `a`. However, ``np.index_exp[indices]`` can be used anywhere in Python code and returns a tuple of slice objects that can be used in the construction of complex index expressions. Parameters ---------- maketuple : bool If True, always returns a tuple. See Also -------- index_exp : Predefined instance that always returns a tuple: `index_exp = IndexExpression(maketuple=True)`. s_ : Predefined instance without tuple conversion: `s_ = IndexExpression(maketuple=False)`. Notes ----- You can do all this with `slice()` plus a few special objects, but there's a lot to remember and this version is simpler because it uses the standard array indexing syntax. Examples -------- >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] (slice(2, None, 2),) >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] array([2, 4]) """ def __init__(self, maketuple): self.maketuple = maketuple def __getitem__(self, item): if self.maketuple and type(item) != tuple: return (item,) else: return item index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) # End contribution from Konrad. # The following functions complement those in twodim_base, but are # applicable to N-dimensions. def fill_diagonal(a, val, wrap=False): """Fill the main diagonal of the given array of any dimensionality. For an array `a` with ``a.ndim > 2``, the diagonal is the list of locations with indices ``a[i, i, ..., i]`` all identical. This function modifies the input array in-place, it does not return a value. Parameters ---------- a : array, at least 2-D. Array whose diagonal is to be filled, it gets modified in-place. val : scalar Value to be written on the diagonal, its type must be compatible with that of the array a. wrap: bool For tall matrices in NumPy version up to 1.6.2, the diagonal "wrapped" after N columns. You can have this behavior with this option. This affect only tall matrices. See also -------- diag_indices, diag_indices_from Notes ----- .. versionadded:: 1.4.0 This functionality can be obtained via `diag_indices`, but internally this version uses a much faster implementation that never constructs the indices and uses simple slicing. Examples -------- >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a array([[5, 0, 0], [0, 5, 0], [0, 0, 5]]) The same function can operate on a 4-D array: >>> a = np.zeros((3, 3, 3, 3), int) >>> np.fill_diagonal(a, 4) We only show a few blocks for clarity: >>> a[0, 0] array([[4, 0, 0], [0, 0, 0], [0, 0, 0]]) >>> a[1, 1] array([[0, 0, 0], [0, 4, 0], [0, 0, 0]]) >>> a[2, 2] array([[0, 0, 0], [0, 0, 0], [0, 0, 4]]) # tall matrices no wrap >>> a = np.zeros((5, 3),int) >>> fill_diagonal(a, 4) array([[4, 0, 0], [0, 4, 0], [0, 0, 4], [0, 0, 0], [0, 0, 0]]) # tall matrices wrap >>> a = np.zeros((5, 3),int) >>> fill_diagonal(a, 4) array([[4, 0, 0], [0, 4, 0], [0, 0, 4], [0, 0, 0], [4, 0, 0]]) # wide matrices >>> a = np.zeros((3, 5),int) >>> fill_diagonal(a, 4) array([[4, 0, 0, 0, 0], [0, 4, 0, 0, 0], [0, 0, 4, 0, 0]]) """ if a.ndim < 2: raise ValueError("array must be at least 2-d") end = None if a.ndim == 2: # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = a.shape[1] + 1 #This is needed to don't have tall matrix have the diagonal wrap. if not wrap: end = a.shape[1] * a.shape[1] else: # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(a.shape)==0): raise ValueError("All dimensions of input must be of equal length") step = 1 + (cumprod(a.shape[:-1])).sum() # Write the value out into the diagonal. a.flat[:end:step] = val def diag_indices(n, ndim=2): """ Return the indices to access the main diagonal of an array. This returns a tuple of indices that can be used to access the main diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``. Parameters ---------- n : int The size, along each dimension, of the arrays for which the returned indices can be used. ndim : int, optional The number of dimensions. See also -------- diag_indices_from Notes ----- .. versionadded:: 1.4.0 Examples -------- Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> a[di] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) Now, we create indices to manipulate a 3-D array: >>> d3 = np.diag_indices(2, 3) >>> d3 (array([0, 1]), array([0, 1]), array([0, 1])) And use it to set the diagonal of an array of zeros to 1: >>> a = np.zeros((2, 2, 2), dtype=np.int) >>> a[d3] = 1 >>> a array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) """ idx = arange(n) return (idx,) * ndim def diag_indices_from(arr): """ Return the indices to access the main diagonal of an n-dimensional array. See `diag_indices` for full details. Parameters ---------- arr : array, at least 2-D See Also -------- diag_indices Notes ----- .. versionadded:: 1.4.0 """ if not arr.ndim >= 2: raise ValueError("input array must be at least 2-d") # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(arr.shape) == 0): raise ValueError("All dimensions of input must be of equal length") return diag_indices(arr.shape[0], arr.ndim)
{ "repo_name": "dwf/numpy", "path": "numpy/lib/index_tricks.py", "copies": "2", "size": "25566", "license": "bsd-3-clause", "hash": -593190225711470800, "line_mean": 29.4719904648, "line_max": 80, "alpha_frac": 0.5335210827, "autogenerated": false, "ratio": 3.750880281690141, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5284401364390141, "avg_score": null, "num_lines": null }
__all__ = ['RawBot'] class RawBot(object): '''Raw bot. This bot does not process the state. Implement the following methods to use: - start(state): called when the game starts. - move(state): called when the game requests a move from this bot. - end(): called after the game finishes. Attributes: id (int): the bot's id. state (dict): the unprocessed state from server. ''' id = None state = None def _start(self, state): '''Wrapper to start method.''' self.id = state['hero']['id'] self.state = state self.start() def _move(self, state): '''Wrapper to move method.''' self.state = state return self.move() def _end(self): '''Wrapper to end method.''' self.end() def start(self): '''Called when the game starts.''' pass def move(self): '''Called when the game requests a move from this bot.''' pass def end(self): '''Called after the game finishes.''' pass
{ "repo_name": "renatopp/vindinium-python", "path": "vindinium/bots/raw_bot.py", "copies": "1", "size": "1071", "license": "mit", "hash": -5875034517699699000, "line_mean": 23.3409090909, "line_max": 70, "alpha_frac": 0.5536881419, "autogenerated": false, "ratio": 4.25, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.003558310376492195, "num_lines": 44 }
__all__ = ['read_array','write_array','read_header'] #from scipy.io import read_array,write_array from scipy import shape,rank import numpy import scipy import sys class ArrayIOException(Exception): def __init__(self,msg=''): self.msg = msg def __str__(self): return self.msg class FileFormatError(ArrayIOException): pass class ArrayHeader(dict): def tostring(self): self['version'] = '1.0' output = str(len(self)) + '\n' for key,value in self.iteritems(): output += key output += '=' output += str(value) output += '\n' return output #---user functions---#000000#FFFFFF------------------------------------------------------- def read_array(fid): """ Read an ndarray or sparse matrix from file. ASCII formats basic ndarray sparse BINARY formats ndarray sparse Notes: ndarray IO makes use to ndarray.tofile() and fromfile() The following sparse matrix formats are supported: csr_matrix csc_matrix coo_matrix """ if type(fid) is not file: fid = open(fid) header = read_header(fid) try: format = header['format'] except: raise FileFormatError('File format unspecified in file') if format not in ['','basic','ascii','binary']: raise FileFormatError('Unknown format: ['+format+']') if format == 'basic': return read_basic(fid,header) else: try: array_type = header['type'] except KeyError: raise FileFormatError('Array type unspecified in file: ['+fid.name+']') if array_type == 'ndarray': return read_ndarray(fid,header) elif array_type == 'sparse': return read_sparse(fid,header) else: raise FileFormatError('Unknown array type: ['+array_type+']') def write_array(fid,A,format='binary'): """ Write an ndarray or sparse matrix to a file format may be one of ['basic','ascii','binary'] basic - Most human readable - Only works for arrays of rank 1 and 2 - Does not work for sparse matrices ascii - Somewhat human readable - Works for ndarrays and sparse matrices binary - Fastest format - Works for ndarrays and sparse matrices - Data stored in LittleEndian """ if format not in ['basic','ascii','binary']: raise ArrayIOException('Unknown format: ['+format+']') if type(fid) is not file: fid = open(fid,'wb') if type(A) is numpy.ndarray: A = numpy.ascontiguousarray(A) #strided arrays break in write if format == 'basic': if rank(A) > 2: raise ArrayIOException('basic format only works for rank 1 or 2 arrays') write_basic(fid,A) else: write_ndarray(fid,A,format) elif scipy.sparse.isspmatrix(A): if format not in ['ascii','binary']: raise ArrayIOException('sparse matrices require ascii or binary format') write_sparse(fid,A,format) else: try: A = asarray(A) if format == 'basic': if rank(A) > 2: raise ArrayIOException('basic format only works for rank 1 or 2 arrays') write_basic(fid,A) else: write_ndarray(fid,A,format) except: raise ArrayIOException('Unknown data type and unable to convert to numpy.ndarray') def read_header(fid): """ Read the header of an array file into a dictionary """ if type(fid) is not file: fid = open(fid) first_line = fid.readline() try: numlines = int(first_line) except: print 'firstline error: '+first_line raise ArrayIOException() #numlines = int(fid.readline()) header = ArrayHeader() for i in range(numlines): line = fid.readline().rstrip() parts = line.split('=') if len(parts) != 2: raise FileFormatError('File header error: line #'+str(i)+' ['+line+']') header[parts[0]] = parts[1] return header #---basic---#000000#FFFFFF------------------------------------------------------ def basic_header(A): header = ArrayHeader() header['dims'] = ','.join(map(str,A.shape)) header['dtype'] = A.dtype.name return header def read_basic(fid,header): try: dimensions = map(int,header['dims'].split(',')) except: raise FileFormatError('Unable to determine dims') try: dtype = numpy.typeDict[header['dtype']] except: raise FileFormatError('Unable to determine dtype') if len(dimensions) != 2: raise FileFormatError('basic format only supports 2d arrays') if min(dimensions) < 1: raise FileFormatError('all dimensions must be positive') return numpy.fromfile(fid,dtype=dtype,count=numpy.prod(dimensions),sep=' ').reshape(dimensions) def write_basic(fid,A): A = numpy.atleast_2d(A) #force 1d arrays to 2d header = basic_header(A) header['format'] = 'basic' fid.write(header.tostring()) for row in A: row.tofile(fid,sep=' ',format='%.16g') fid.write('\n') #---ndarray---#000000#FFFFFF------------------------------------------------- def ndarray_header(A): header = ArrayHeader() header['type'] = 'ndarray' header['rank'] = rank(A) header['dims'] = ','.join(map(str,A.shape)) header['dtype'] = A.dtype.name return header def read_ndarray(fid,header): try: rank = int(header['rank']) except: raise FileFormatError('Unable to determine rank') try: dims = map(int,header['dims'].split(',')) except: raise FileFormatError('Unable to determine dims') try: dtype = numpy.typeDict[header['dtype']] except: raise FileFormatError('Unable to determine dtype') try: format = header['format'] except: raise FileFormatError('Unable to determine format') if len(dims) != rank or min(dims) < 0: raise FileFormatError('Invalid dims') if format == 'ascii': sep = ' ' else: sep = '' if format == 'ascii': return numpy.fromfile(fid,dtype=dtype,count=numpy.prod(dims),sep=' ').reshape(dims) else: A = numpy.fromfile(fid,dtype=dtype,count=numpy.prod(dims),sep='').reshape(dims) if sys.byteorder == 'big': A = A.byteswap(True) #in-place swap return A def write_ndarray(fid,A,format): header = ndarray_header(A) header['format'] = format fid.write(header.tostring()) if format == 'binary': if sys.byteorder == 'little': A.tofile(fid) else: A.byteswap().tofile(fid) elif format == 'ascii': A.tofile(fid,sep=' ',format='%.16g') if A.size > 0: fid.write('\n') #only introduce newline when something has been written else: raise ArrayIOException('Unknown file format: ['+format+']') #---sparse---#000000#FFFFFF----------------------------------------------------- supported_sparse_formats = ['csr','csc','coo'] def sparse_header(A): header = ArrayHeader() header['type'] = 'sparse' header['sptype'] = A.format header['dims'] = ','.join(map(str,A.shape)) return header def read_sparse(fid,header): try: dims = map(int,header['dims'].split(',')) except: raise FileFormatError('Unable to determine dims') try: format = header['sptype'] except: raise FileFormatError('Unable to determine sparse format') if len(dims) != 2 or min(dims) < 1: raise FileFormatError('Invalid dims') if header['sptype'] not in supported_sparse_formats: raise ArrayIOException('Only '+str(supported_sparse_formats)+' are supported') if header['sptype'] == 'csr': data = read_array(fid) colind = read_array(fid) indptr = read_array(fid) return scipy.sparse.csr_matrix((data,colind,indptr),dims) elif header['sptype'] == 'csc': data = read_array(fid) rowind = read_array(fid) indptr = read_array(fid) return scipy.sparse.csc_matrix((data,rowind,indptr),dims) elif header['sptype'] == 'coo': data = read_array(fid) row = read_array(fid) col = read_array(fid) return scipy.sparse.coo_matrix((data,(row,col)),dims) def write_sparse(fid,A,format): if A.format not in supported_sparse_formats: raise ArrayIOException('Only '+str(supported_sparse_formats)+' are supported') header = sparse_header(A) header['format'] = format fid.write(header.tostring()) if A.format == 'csr': write_array(fid,A.data,format) write_array(fid,A.indices,format) write_array(fid,A.indptr,format) elif A.format == 'csc': write_array(fid,A.data,format) write_array(fid,A.indices,format) write_array(fid,A.indptr,format) elif A.format == 'coo': write_array(fid,A.data,format) write_array(fid,A.row,format) write_array(fid,A.col,format) else: assert(false)
{ "repo_name": "wangregoon/pydec", "path": "pydec/io/arrayio.py", "copies": "6", "size": "9348", "license": "bsd-3-clause", "hash": -3908098348429430000, "line_mean": 31.1237113402, "line_max": 136, "alpha_frac": 0.5708172871, "autogenerated": false, "ratio": 3.937657961246841, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.039153319804840984, "num_lines": 291 }
__all__ = ['readGadgetSnapshot', 'GadgetHeader'] import numpy as np import struct from collections import namedtuple __GadgetHeader_fmt = '6I6dddii6Iiiddddii6Ii' GadgetHeader = namedtuple('GadgetHeader', \ 'npart mass time redshift flag_sfr flag_feedback npartTotal flag_cooling num_files BoxSize Omega0 OmegaLambda HubbleParam flag_age flag_metals NallHW flag_entr_ics') def readGadgetSnapshot(filename, read_pos=False, read_vel=False, read_id=False,\ read_mass=False, print_header=False, single_type=-1, lgadget=False): """ This function reads the Gadget-2 snapshot file. Parameters ---------- filename : str path to the input file read_pos : bool, optional Whether to read the positions or not. Default is false. read_vel : bool, optional Whether to read the velocities or not. Default is false. read_id : bool, optional Whether to read the particle IDs or not. Default is false. read_mass : bool, optional Whether to read the masses or not. Default is false. print_header : bool, optional Whether to print out the header or not. Default is false. single_type : int, optional Set to -1 (default) to read in all particle types. Set to 0--5 to read in only the corresponding particle type. lgadget : bool, optional Set to True if the particle file comes from l-gadget. Default is false. Returns ------- ret : tuple A tuple of the requested data. The first item in the returned tuple is always the header. The header is in the GadgetHeader namedtuple format. """ blocks_to_read = (read_pos, read_vel, read_id, read_mass) ret = [] with open(filename, 'rb') as f: f.seek(4, 1) h = list(struct.unpack(__GadgetHeader_fmt, \ f.read(struct.calcsize(__GadgetHeader_fmt)))) if lgadget: h[30] = 0 h[31] = h[18] h[18] = 0 single_type = 1 h = tuple(h) header = GadgetHeader._make((h[0:6],) + (h[6:12],) + h[12:16] \ + (h[16:22],) + h[22:30] + (h[30:36],) + h[36:]) if print_header: print header if not any(blocks_to_read): return header ret.append(header) f.seek(256 - struct.calcsize(__GadgetHeader_fmt), 1) f.seek(4, 1) # mass_npart = [0 if m else n for m, n in zip(header.mass, header.npart)] if single_type not in range(6): single_type = -1 # for i, b in enumerate(blocks_to_read): if i < 2: fmt = np.dtype(np.float32) item_per_part = 3 npart = header.npart elif i==2: fmt = np.dtype(np.uint64) if lgadget or any(header.NallHW) \ else np.dtype(np.uint32) item_per_part = 1 npart = header.npart elif i==3: fmt = np.dtype(np.float32) if sum(mass_npart) == 0: ret.append(np.array([], fmt)) break item_per_part = 1 npart = mass_npart size_per_part = item_per_part*fmt.itemsize # f.seek(4, 1) if not b: f.seek(sum(npart)*size_per_part, 1) else: if single_type > -1: f.seek(sum(npart[:single_type])*size_per_part, 1) npart_this = npart[single_type] else: npart_this = sum(npart) data = np.fromstring(f.read(npart_this*size_per_part), fmt) if item_per_part > 1: data.shape = (npart_this, item_per_part) ret.append(data) if not any(blocks_to_read[i+1:]): break if single_type > -1: f.seek(sum(npart[single_type+1:])*size_per_part, 1) f.seek(4, 1) # return tuple(ret) if __name__ == '__main__': from sys import argv for f in argv[1:]: h = readGadgetSnapshot(f, print_header=True)
{ "repo_name": "drphilmarshall/Delaunay", "path": "readGadgetSnapshot.py", "copies": "1", "size": "4211", "license": "mit", "hash": 2784907580651161000, "line_mean": 35.6173913043, "line_max": 173, "alpha_frac": 0.5357397293, "autogenerated": false, "ratio": 3.6395851339671563, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9617496633788869, "avg_score": 0.01156564589565745, "num_lines": 115 }
__all__ = ('read_job', 'read_logs') import subprocess from os.path import dirname, join, realpath from web.config import config, r class QueryDict(dict): # taken from kivy def __getattr__(self, attr): try: return self.__getitem__(attr) except KeyError: try: return super(QueryDict, self).__getattr__(attr) except AttributeError: raise KeyError(attr) def __setattr__(self, attr, value): self.__setitem__(attr, value) class JobObj(QueryDict): @property def directory(self): return realpath(join(dirname(__file__), '..', 'jobs', self.uid)) @property def data_fn(self): return join(self.directory, 'data%s' % self.data_ext) @property def icon_fn(self): return join(self.directory, 'icon.png') @property def presplash_fn(self): return join(self.directory, 'presplash.png') @property def apk_fn(self): return join(self.directory, self.apk) def notify(self): if len(self.emails) == 0: return status = 'failed build' if self.is_failed == '1' else 'finished' subject = '[p4a] Build %s, version %s %s' % ( self.package_title, self.package_version, status) if self.is_failed == '1': content = ('Hi,\n\nYour package %s failed to build.\n\n' 'Informations: %s\n\nP4A Build Cloud.') % ( self.package_title, '%s/job/%s' % (config.get('www', 'baseurl'),self.uid)) else: content = ('Hi,\n\nYour package %s is available.\n\n' 'APK: %s\nInformations: %s\n\nEnjoy,\n\nP4A Build Cloud.') % ( self.package_title, '%s/download/%s/%s' % (config.get('www', 'baseurl'), self.uid, self.apk), '%s/job/%s' % (config.get('www', 'baseurl'),self.uid)) for email in self.emails.split(): cmd = ['mail', '-s', subject, '-a', 'From: p4a-noreply@kivy.org', email] p = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE) p.stdin.write(content) p.stdin.close() p.communicate() def read_entry(basekey, cls=QueryDict, *keyswanted): keys = r.keys('%s*' % basekey) entry = cls() for key in keyswanted: entry[key] = None for key in keys: skey = key[len(basekey):] if keyswanted and skey not in keyswanted: continue entry[skey] = r.get(key) return entry def read_logs(uid): return read_entry('log:%s:' % uid).values() def read_job(uid, *keys): if not r.keys('job:%s' % uid): return None job = read_entry('job:%s:' % uid, JobObj, *keys) job['uid'] = uid return job
{ "repo_name": "kivy/p4a-cloud", "path": "master/web/job.py", "copies": "1", "size": "2861", "license": "mit", "hash": 7344058314927456000, "line_mean": 29.4361702128, "line_max": 82, "alpha_frac": 0.5354771059, "autogenerated": false, "ratio": 3.58072590738423, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9548111236453083, "avg_score": 0.013618355366229334, "num_lines": 94 }
__all__ = ['read_mesh','write_mesh'] from pydec.mesh import simplicial_mesh import pydec.io.arrayio #from xml.dom.ext import PrettyPrint from xml.dom import minidom import os class PyMeshException(Exception): pass class PyMeshIOException(PyMeshException): pass mesh_str_type_pairs = [('simplicial_mesh',simplicial_mesh)] mesh_str_to_type = dict(mesh_str_type_pairs) mesh_type_to_str = dict([(t,s) for (s,t) in mesh_str_type_pairs]) def read_arrays(node_list,filepath): array_dict = dict() for node in node_list: file_node = node.getElementsByTagName('file')[0] file_name = str(file_node.attributes['name'].value) file_name = os.path.join(filepath,file_name) data = pydec.io.arrayio.read_array(file_name) array_dict[str(node.nodeName)] = data return array_dict def read_mesh(fid): """ Read a mesh from a given open file or filename. Examples: my_mesh = read_mesh('torus.xml') or fid = open('torus.xml') my_mesh = read_mesh(fid) """ if type(fid) is not file: fid = open(fid) xmldoc = minidom.parse(fid) mesh_node = xmldoc.firstChild if mesh_node.tagName != 'mesh': raise PyMeshIOException('Invalid XML root node') (filepath,filename) = os.path.split(fid.name) children = [child for child in xmldoc.firstChild.childNodes if child.nodeType == child.ELEMENT_NODE] array_dict = read_arrays(children,filepath) if mesh_node.hasAttribute('type'): mesh_str = str(mesh_node.attributes['type'].value) else: mesh_str = 'simplicial_mesh' mesh_type = mesh_str_to_type[mesh_str] return mesh_type(array_dict) def write_mesh(fid,mesh,format='binary'): """ Write a mesh to a given file or filename. Examples: write_mesh('torus.xml',my_mesh) or write_mesh('torus.xml',my_mesh,format='ascii') or fid = open('torus.xml') write_mesh(fid,my_mesh,format='basic') """ if type(fid) is not file: fid = open(fid,'w') (filepath,filename) = os.path.split(fid.name) basename = filename.split('.')[0] xmldoc = minidom.Document() mesh_node = xmldoc.appendChild(xmldoc.createElement('mesh')) mesh_node.setAttribute('type',mesh_type_to_str[type(mesh)]) for key,value in mesh.iteritems(): data_filename = basename + '.' + key data_node = mesh_node.appendChild(xmldoc.createElement(key)) data_file_node = data_node.appendChild(xmldoc.createElement('file')) data_file_node.setAttribute('name',data_filename) pydec.io.arrayio.write_array(os.path.join(filepath,data_filename),value,format) xmldoc.writexml(fid,indent='',addindent='\t',newl='\n')
{ "repo_name": "pkuwwt/pydec", "path": "pydec/io/meshio.py", "copies": "6", "size": "2831", "license": "bsd-3-clause", "hash": -8700871506209128000, "line_mean": 24.0530973451, "line_max": 104, "alpha_frac": 0.6287530908, "autogenerated": false, "ratio": 3.4356796116504853, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.03790410917305179, "num_lines": 113 }
__all__ = [ "reconstruct" ] import numpy as np import _irtk import image from registration import RigidTransformation def reconstruct( slice_list, transformation_list, stack_ids, mask, thickness, iterations=1 ): """ No registration is performed if there is only one iteration. """ # if mask is None: # mask = image.ones( template_header, dtype='float32' ) stack_ids = np.array( stack_ids, dtype='int32' ) thickness = np.array( thickness, dtype='float64' ) n = len(slice_list) nb_pixels = 0 for img in slice_list: nb_pixels += img.nb_pixels() pixelData = np.zeros( nb_pixels, dtype='float32' ) pixelSize = np.zeros( 4*n, dtype='float64' ) xAxis = np.zeros( 3*n, dtype='float64' ) yAxis = np.zeros( 3*n, dtype='float64' ) zAxis = np.zeros( 3*n, dtype='float64' ) origin = np.zeros( 4*n, dtype='float64' ) dim = np.zeros( 4*n, dtype='int32' ) tx = np.zeros( n, dtype='float64' ) ty = np.zeros( n, dtype='float64' ) tz = np.zeros( n, dtype='float64' ) rx = np.zeros( n, dtype='float64' ) ry = np.zeros( n, dtype='float64' ) rz = np.zeros( n, dtype='float64' ) offset = 0 for i, img in enumerate(slice_list): pixelData[offset:offset+img.nb_pixels()] = img.flatten() offset += img.nb_pixels() pixelSize[4*i:4*(i+1)] = img.header['pixelSize'] xAxis[3*i:3*(i+1)] = img.header['orientation'][0] yAxis[3*i:3*(i+1)] = img.header['orientation'][1] zAxis[3*i:3*(i+1)] = img.header['orientation'][2] origin[4*i:4*(i+1)] = img.header['origin'] dim[4*i:4*(i+1)] = img.header['dim'] tx[i] = transformation_list[i].tx ty[i] = transformation_list[i].ty tz[i] = transformation_list[i].tz rx[i] = transformation_list[i].rx ry[i] = transformation_list[i].ry rz[i] = transformation_list[i].rz #_irtk.write_list( pixelData, pixelSize, xAxis, yAxis, zAxis, origin, dim, n ) reconstructed = _irtk.reconstruct( pixelData, pixelSize, xAxis, yAxis, zAxis, origin, dim, n, stack_ids, iterations, tx, ty, tz, rx, ry, rz, thickness, mask.get_data('float32','cython'), mask.get_header() ) new_transformations = [] for i in xrange(n): new_transformations.append( RigidTransformation( tx=tx[i], ty=ty[i], tz=tz[i], rx=rx[i], ry=ry[i], rz=rz[i] ) ) return ( image.Image(reconstructed, mask.get_header()), new_transformations )
{ "repo_name": "BioMedIA/IRTK", "path": "wrapping/cython/irtk/ext/reconstruction.py", "copies": "5", "size": "3174", "license": "apache-2.0", "hash": 6594072053911882000, "line_mean": 35.4827586207, "line_max": 90, "alpha_frac": 0.470384373, "autogenerated": false, "ratio": 3.703617269544924, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.020074838129160444, "num_lines": 87 }
__all__ = ['ReferenceFrame', 'Vector', 'Dyadic', 'dynamicsymbols', 'MechanicsStrPrinter', 'MechanicsPrettyPrinter', 'MechanicsLatexPrinter'] from sympy import (Matrix, Symbol, sin, cos, eye, trigsimp, diff, sqrt, sympify, expand, zeros, Derivative, Function, symbols, Add, solve) from sympy.core import C from sympy.core.function import UndefinedFunction from sympy.core.numbers import Zero from sympy.printing.conventions import split_super_sub from sympy.printing.latex import LatexPrinter from sympy.printing.pretty.pretty import PrettyPrinter from sympy.printing.pretty.stringpict import prettyForm, stringPict from sympy.printing.str import StrPrinter from sympy.utilities import group class Dyadic(object): """A Dyadic object. See: http://en.wikipedia.org/wiki/Dyadic_tensor Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill A more powerful way to represent a rigid body's inertia. While it is more complex, by choosing Dyadic components to be in body fixed basis vectors, the resulting matrix is equivalent to the inertia tensor. """ def __init__(self, inlist): """ Just like Vector's init, you shouldn't call this. Stores a Dyadic as a list of lists; the inner list has the measure number and the two unit vectors; the outerlist holds each unique unit vector pair. """ self.args = [] while len(inlist) != 0: added = 0 for i, v in enumerate(self.args): if ((str(inlist[0][1]) == str(self.args[i][1])) and (str(inlist[0][2]) == str(self.args[i][2]))): self.args[i] = (self.args[i][0] + inlist[0][0], inlist[0][1], inlist[0][2]) inlist.remove(inlist[0]) added = 1 break if added != 1: self.args.append(inlist[0]) inlist.remove(inlist[0]) i = 0 # This code is to remove empty parts from the list while i < len(self.args): if ((self.args[i][0] == 0) | (self.args[i][1] == 0) | (self.args[i][2] == 0)): self.args.remove(self.args[i]) i -= 1 i += 1 def __add__(self, other): """The add operator for Dyadic. """ if isinstance(other, (int, type(Zero()))): if other == 0: return self self._check_dyadic(other) return Dyadic(self.args + other.args) def __and__(self, other): """The inner product operator for a Dyadic and a Dyadic or Vector. Parameters ========== other : Dyadic or Vector The other Dyadic or Vector to take the inner product with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> D1 = outer(N.x, N.y) >>> D2 = outer(N.y, N.y) >>> D1.dot(D2) (N.x|N.y) >>> D1.dot(N.y) N.x """ if isinstance(other, (int, type(Zero()))): if other == 0: return 0 ol = 0 if isinstance(other, Dyadic): for i, v in enumerate(self.args): for i2, v2 in enumerate(other.args): ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2]) elif isinstance(other, Vector): for i, v in enumerate(self.args): ol += v[0] * v[1] * (v[2] & other) else: raise TypeError('Need to supply a Vector or Dyadic') return ol def __div__(self, other): """Divides the Dyadic by a sympifyable expression. """ return self.__mul__(1 / other) __truediv__ = __div__ def __eq__(self, other): """Tests for equality. Is currently weak; needs stronger comparison testing """ if isinstance(other, (int, type(Zero()))): if (other == 0) & (self.args == []): return True elif other == 0: return False self._check_dyadic(other) return set(self.args) == set(other.args) def __mul__(self, other): """Multiplies the Dyadic by a sympifyable expression. Parameters ========== other : Sympafiable The scalar to multiply this Dyadic with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> 5 * d 5*(N.x|N.x) """ newlist = [v for v in self.args] for i, v in enumerate(newlist): newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1], newlist[i][2]) return Dyadic(newlist) def __ne__(self, other): return not self.__eq__(other) def __neg__(self): return self * -1 def _latex(self, printer=None): ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string mlp = MechanicsLatexPrinter() for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.append(' + ' + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.append(' - ' + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: arg_str = mlp.doprint(ar[i][0]) if isinstance(ar[i][0], Add): arg_str = '(%s)' % arg_str if arg_str.startswith('-'): arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + r" " + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def _pretty(self, printer=None): e = self class Fake(object): baseline = 0 def render(self, *args, **kwargs): self = e ar = self.args # just to shorten things mpp = MechanicsPrettyPrinter() if len(ar) == 0: return unicode(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.append(u" + " + mpp.doprint(ar[i][1]) + u"\u2a02 " + mpp.doprint(ar[i][2])) # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.append(u" - " + mpp.doprint(ar[i][1]) + u"\u2a02 " + mpp.doprint(ar[i][2])) # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: arg_str = mpp.doprint(ar[i][0]) if isinstance(ar[i][0], Add): arg_str = u"(%s)" % arg_str if arg_str.startswith(u"-"): arg_str = arg_str[1:] str_start = u" - " else: str_start = u" + " ol.append(str_start + arg_str + u" " + mpp.doprint(ar[i][1]) + u"\u2a02 " + mpp.doprint(ar[i][2])) outstr = u"".join(ol) if outstr.startswith(u" + "): outstr = outstr[3:] elif outstr.startswith(" "): outstr = outstr[1:] return outstr return Fake() def __rand__(self, other): """The inner product operator for a Vector or Dyadic, and a Dyadic This is for: Vector dot Dyadic Parameters ========== other : Vector The vector we are dotting with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, dot, outer >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> dot(N.x, d) N.x """ if isinstance(other, (int, type(Zero()))): if other == 0: return 0 ol = 0 if isinstance(other, Vector): for i, v in enumerate(self.args): ol += v[0] * v[2] * (v[1] & other) else: raise TypeError('Need to supply a Vector or Dyadic') return ol def __rsub__(self, other): return (-1 * self) + other def __rxor__(self, other): """For a cross product in the form: Vector x Dyadic Parameters ========== other : Vector The Vector that we are crossing this Dyadic with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer, cross >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> cross(N.y, d) - (N.z|N.x) """ if isinstance(other, (int, type(Zero()))): if (other == 0): return 0 self._check_vector(other) ol = 0 for i, v in enumerate(self.args): ol += v[0] * ((other ^ v[1]) | v[2]) return ol def __str__(self, printer=None): """Printing method. """ ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.append(' + (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.append(' - (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: arg_str = MechanicsStrPrinter().doprint(ar[i][0]) if isinstance(ar[i][0], Add): arg_str = "(%s)" % arg_str if arg_str[0] == '-': arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + '*(' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def __sub__(self, other): """The subtraction operator. """ return self.__add__(other * -1) def __xor__(self, other): """For a cross product in the form: Dyadic x Vector. Parameters ========== other : Vector The Vector that we are crossing this Dyadic with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer, cross >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> cross(d, N.y) (N.x|N.z) """ if isinstance(other, (int, type(Zero()))): if (other == 0): return 0 self._check_vector(other) ol = 0 for i, v in enumerate(self.args): ol += v[0] * (v[1] | (v[2] ^ other)) return ol def _check_frame(self, other): if not isinstance(other, ReferenceFrame): raise TypeError('A ReferenceFrame must be supplied') def _check_dyadic(self, other): if isinstance(other, (int, type(Zero()))): if other == 0: return if not isinstance(other, Dyadic): raise TypeError('A Dyadic must be supplied') def _check_vector(self, other): if isinstance(other, (int, type(Zero()))): if other == 0: return if not isinstance(other, Vector): raise TypeError('A Vector must be supplied') _sympystr = __str__ _sympyrepr = _sympystr __repr__ = __str__ __radd__ = __add__ __rmul__ = __mul__ def express(self, frame1, frame2=None): """Expresses this Dyadic in alternate frame(s) The first frame is the list side expression, the second frame is the right side; if Dyadic is in form A.x|B.y, you can express it in two different frames. If no second frame is given, the Dyadic is expressed in only one frame. Parameters ========== frame1 : ReferenceFrame The frame to express the left side of the Dyadic in frame2 : ReferenceFrame If provided, the frame to express the right side of the Dyadic in Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer, dynamicsymbols >>> N = ReferenceFrame('N') >>> q = dynamicsymbols('q') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> d = outer(N.x, N.x) >>> d.express(B, N) cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x) """ if frame2 == None: frame2 = frame1 self._check_frame(frame1) self._check_frame(frame2) ol = 0 for i, v in enumerate(self.args): ol += v[0] * (v[1].express(frame1) | v[2].express(frame2)) return ol def dt(self, frame): """Take the time derivative of this Dyadic in a frame. Parameters ========== frame : ReferenceFrame The frame to take the time derivative in Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer, dynamicsymbols >>> N = ReferenceFrame('N') >>> q = dynamicsymbols('q') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> d = outer(N.x, N.x) >>> d.dt(B) - q'*(N.y|N.x) - q'*(N.x|N.y) """ self._check_frame(frame) t = dynamicsymbols._t ol = 0 for i, v in enumerate(self.args): ol += (v[0].diff(t) * (v[1] | v[2])) ol += (v[0] * (v[1].dt(frame) | v[2])) ol += (v[0] * (v[1] | v[2].dt(frame))) return ol def subs(self, dictin): """Substituion on the Dyadic, with a dict. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> s = Symbol('s') >>> a = s * (N.x|N.x) >>> a.subs({s: 2}) 2*(N.x|N.x) """ return sum([ Dyadic( [ (v[0].subs(dictin), v[1], v[2]) ]) for v in self.args]) dot = __and__ cross = __xor__ class ReferenceFrame(object): """A reference frame in classical mechanics. ReferenceFrame is a class used to represent a reference frame in classical mechanics. It has a standard basis of three unit vectors in the frame's x, y, and z directions. It also can have a rotation relative to a parent frame; this rotation is defined by a direction cosine matrix relating this frame's basis vectors to the parent frame's basis vectors. It can also have an angular velocity vector, defined in another frame. """ def __init__(self, name, indices=None, latexs=None): """ReferenceFrame initialization method. A ReferenceFrame has a set of orthonormal basis vectors, along with orientations relative to other ReferenceFrames and angular velocities relative to other ReferenceFrames. Parameters ========== indices : list (of strings) If custom indices are desired for console, pretty, and LaTeX printing, supply three as a list. The basis vectors can then be accessed with the get_item method. latexs : list (of strings) If custom names are desired for LaTeX printing of each basis vector, supply the names here in a list. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, mlatex >>> N = ReferenceFrame('N') >>> N.x N.x >>> O = ReferenceFrame('O', ('1', '2', '3')) >>> O.x O['1'] >>> O['1'] O['1'] >>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3')) >>> mlatex(P.x) 'A1' """ if not isinstance(name, (str, unicode)): raise TypeError('Need to supply a valid name') # The if statements below are for custom printing of basis-vectors for # each frame. # First case, when custom indices are supplied if indices != None: if not isinstance(indices, (tuple, list)): raise TypeError('Supply the indices as a list') if len(indices) != 3: raise ValueError('Supply 3 indices') for i in indices: if not isinstance(i, (str, unicode)): raise TypeError('Indices must be strings') self.str_vecs = [(name + '[\'' + indices[0] + '\']'), (name + '[\'' + indices[1] + '\']'), (name + '[\'' + indices[2] + '\']')] self.pretty_vecs = [(u"\033[94m\033[1m" + name.lower() + u"_" + indices[0] + u"\033[0;0m\x1b[0;0m"), (u"\033[94m\033[1m" + name.lower() + u"_" + indices[1] + u"\033[0;0m\x1b[0;0m"), (u"\033[94m\033[1m" + name.lower() + u"_" + indices[2] + u"\033[0;0m\x1b[0;0m")] self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), indices[0])), (r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), indices[1])), (r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), indices[2]))] self.indices = indices # Second case, when no custom indices are supplied else: self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')] self.pretty_vecs = [(u"\033[94m\033[1m" + name.lower() + u"_x\033[0;0m\x1b[0;0m"), (u"\033[94m\033[1m" + name.lower() + u"_y\033[0;0m\x1b[0;0m"), (u"\033[94m\033[1m" + name.lower() + u"_z\033[0;0m\x1b[0;0m")] self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()), (r"\mathbf{\hat{%s}_y}" % name.lower()), (r"\mathbf{\hat{%s}_z}" % name.lower())] self.indices = ['x', 'y', 'z'] # Different step, for custom latex basis vectors if latexs != None: if not isinstance(latexs, (tuple, list)): raise TypeError('Supply the indices as a list') if len(latexs) != 3: raise ValueError('Supply 3 indices') for i in latexs: if not isinstance(i, (str, unicode)): raise TypeError('Latex entries must be strings') self.latex_vecs = latexs self.name = name self._dcm_dict = {} self._ang_vel_dict = {} self._ang_acc_dict = {} self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict] self._cur = 0 self._x = Vector([(Matrix([1, 0, 0]), self)]) self._y = Vector([(Matrix([0, 1, 0]), self)]) self._z = Vector([(Matrix([0, 0, 1]), self)]) def __getitem__(self, ind): """Returns basis vector for the provided index (index being an str)""" if not isinstance(ind, (str, unicode)): raise TypeError('Supply a valid str for the index') if self.indices[0] == ind: return self.x if self.indices[1] == ind: return self.y if self.indices[2] == ind: return self.z else: raise ValueError('Not a defined index') def __iter__(self): return iter([self.x, self.y, self.z]) def __str__(self): """Returns the name of the frame. """ return self.name __repr__ = __str__ def _check_frame(self, other): if not isinstance(other, ReferenceFrame): raise TypeError('A ReferenceFrame must be supplied') def _check_vector(self, other): if isinstance(other, (int, type(Zero()))): if other == 0: return if not isinstance(other, Vector): raise TypeError('A Vector must be supplied') def _dict_list(self, other, num): """Creates a list from self to other using _dcm_dict. """ outlist = [[self]] oldlist = [[]] while outlist != oldlist: oldlist = outlist[:] for i, v in enumerate(outlist): templist = v[-1]._dlist[num].keys() for i2, v2 in enumerate(templist): if not v.__contains__(v2): littletemplist = v + [v2] if not outlist.__contains__(littletemplist): outlist.append(littletemplist) for i, v in enumerate(oldlist): if v[-1] != other: outlist.remove(v) outlist.sort(key = len) if len(outlist) != 0: return outlist[0] raise ValueError('No Connecting Path found between ' + self.name + ' and ' + other.name) def _w_diff_dcm(self, otherframe): """Angular velocity from time differentiating the DCM. """ dcm2diff = self.dcm(otherframe) diffed = dcm2diff.diff(dynamicsymbols._t) angvelmat = diffed * dcm2diff.T w1 = trigsimp(expand(angvelmat[7]), recursive=True) w2 = trigsimp(expand(angvelmat[2]), recursive=True) w3 = trigsimp(expand(angvelmat[3]), recursive=True) return -Vector([(Matrix([w1, w2, w3]), self)]) def ang_acc_in(self, otherframe): """Returns the angular acceleration Vector of the ReferenceFrame. Effectively returns the Vector: ^N alpha ^B which represent the angular acceleration of B in N, where B is self, and N is otherframe. Parameters ========== otherframe : ReferenceFrame The ReferenceFrame which the angular acceleration is returned in. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_acc(N, V) >>> A.ang_acc_in(N) 10*N.x """ self._check_frame(otherframe) if otherframe in self._ang_acc_dict: return self._ang_acc_dict[otherframe] else: return self.ang_vel_in(otherframe).dt(otherframe) def ang_vel_in(self, otherframe): """Returns the angular velocity Vector of the ReferenceFrame. Effectively returns the Vector: ^N omega ^B which represent the angular velocity of B in N, where B is self, and N is otherframe. Parameters ========== otherframe : ReferenceFrame The ReferenceFrame which the angular velocity is returned in. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_vel(N, V) >>> A.ang_vel_in(N) 10*N.x """ self._check_frame(otherframe) flist = self._dict_list(otherframe, 1) outvec = 0 for i in range(len(flist) - 1): outvec += flist[i]._ang_vel_dict[flist[i + 1]] return outvec def dcm(self, otherframe): """The direction cosine matrix between frames. This gives the DCM between this frame and the otherframe. The format is N.xyz = N.dcm(B) * B.xyz A SymPy Matrix is returned. Parameters ========== otherframe : ReferenceFrame The otherframe which the DCM is generated to. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.x]) >>> N.dcm(A) [1, 0, 0] [0, cos(q1), -sin(q1)] [0, sin(q1), cos(q1)] """ self._check_frame(otherframe) flist = self._dict_list(otherframe, 0) outdcm = eye(3) for i in range(len(flist) - 1): outdcm = outdcm * flist[i + 1]._dcm_dict[flist[i]] return outdcm def orient(self, parent, rot_type, amounts, rot_order=''): """Defines the orientation of this frame relative to a parent frame. Supported orientation types are Body, Space, Quaternion, Axis. Examples show correct usage. Parameters ========== parent : ReferenceFrame The frame that this ReferenceFrame will have its orientation matrix defined in relation to. rot_type : str The type of orientation matrix that is being created. amounts : list OR value The quantities that the orientation matrix will be defined by. rot_order : str If applicable, the order of a series of rotations. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> from sympy import symbols >>> q0, q1, q2, q3, q4 = symbols('q0 q1 q2 q3 q4') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') Now we have a choice of how to implement the orientation. First is Body. Body orientation takes this reference frame through three successive simple rotations. Acceptable rotation orders are of length 3, expressed in XYZ or 123, and cannot have a rotation about about an axis twice in a row. >>> B.orient(N, 'Body', [q1, q2, q3], '123') >>> B.orient(N, 'Body', [q1, q2, 0], 'ZXZ') >>> B.orient(N, 'Body', [0, 0, 0], 'XYX') Next is Space. Space is like Body, but the rotations are applied in the opposite order. >>> B.orient(N, 'Space', [q1, q2, q3], '312') Next is Quaternion. This orients the new ReferenceFrame with Quaternions, defined as a finite rotation about lambda, a unit vector, by some amount theta. This orientation is described by four parameters: q0 = cos(theta/2) q1 = lambda_x sin(theta/2) q2 = lambda_y sin(theta/2) q3 = lambda_z sin(theta/2) Quaternion does not take in a rotation order. >>> B.orient(N, 'Quaternion', [q0, q1, q2, q3]) Last is Axis. This is a rotation about an arbitrary, non-time-varying axis by some angle. The axis is supplied as a Vector. This is how simple rotations are defined. >>> B.orient(N, 'Axis', [q1, N.x + 2 * N.y]) """ self._check_frame(parent) amounts = list(amounts) for i, v in enumerate(amounts): if not isinstance(v, Vector): amounts[i] = sympify(v) def _rot(axis, angle): """DCM for simple axis 1,2,or 3 rotations. """ if axis == 1: return Matrix([[1, 0, 0], [0, cos(angle), -sin(angle)], [0, sin(angle), cos(angle)]]) elif axis == 2: return Matrix([[cos(angle), 0, sin(angle)], [0, 1, 0], [-sin(angle), 0, cos(angle)]]) elif axis == 3: return Matrix([[cos(angle), -sin(angle), 0], [sin(angle), cos(angle), 0], [0, 0, 1]]) approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131', '212', '232', '313', '323', '') rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123 rot_type = rot_type.upper() rot_order = [i.replace('X', '1') for i in rot_order] rot_order = [i.replace('Y', '2') for i in rot_order] rot_order = [i.replace('Z', '3') for i in rot_order] rot_order = ''.join(rot_order) if not rot_order in approved_orders: raise TypeError('The supplied order is not an approved type') parent_orient = [] if rot_type == 'AXIS': if not rot_order == '': raise TypeError('Axis orientation takes no rotation order') if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)): raise TypeError('Amounts are a list or tuple of length 2') theta = amounts[0] axis = amounts[1] self._check_vector(axis) if not axis.dt(parent) == 0: raise ValueError('Axis cannot be time-varying') axis = axis.express(parent).normalize() axis = axis.args[0][0] parent_orient = ((eye(3) - axis * axis.T) * cos(theta) + Matrix([[0, -axis[2], axis[1]],[axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T) elif rot_type == 'QUATERNION': if not rot_order == '': raise TypeError('Quaternion orientation takes no rotation order') if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)): raise TypeError('Amounts are a list or tuple of length 4') q0, q1, q2, q3 = amounts parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 ** 2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)], [2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 **2 - q3 ** 2, 2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 * q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]])) elif rot_type == 'BODY': if not (len(amounts) == 3 & len(rot_order) == 3): raise TypeError('Body orientation takes 3 values & 3 orders') a1 = int(rot_order[0]) a2 = int(rot_order[1]) a3 = int(rot_order[2]) parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1]) * _rot(a3, amounts[2])) elif rot_type == 'SPACE': if not (len(amounts) == 3 & len(rot_order) == 3): raise TypeError('Space orientation takes 3 values & 3 orders') a1 = int(rot_order[0]) a2 = int(rot_order[1]) a3 = int(rot_order[2]) parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1]) * _rot(a1, amounts[0])) else: raise NotImplementedError('That is not an implemented rotation') self._dcm_dict.update({parent: parent_orient}) parent._dcm_dict.update({self: parent_orient.T}) if rot_type == 'QUATERNION': t = dynamicsymbols._t q0, q1, q2, q3 = amounts q0d = diff(q0, t) q1d = diff(q1, t) q2d = diff(q2, t) q3d = diff(q3, t) w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1) w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2) w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3) wvec = Vector([(Matrix([w1, w2, w3]), self)]) elif rot_type == 'AXIS': thetad = (amounts[0]).diff(dynamicsymbols._t) wvec = thetad * amounts[1].express(parent).normalize() else: try: from sympy.polys.polyerrors import CoercionFailed from sympy.physics.mechanics.functions import kinematic_equations q1, q2, q3 = amounts u1, u2, u3 = dynamicsymbols('u1, u2, u3') templist = kinematic_equations([u1, u2, u3], [q1, q2, q3], rot_type, rot_order) templist = [expand(i) for i in templist] td = solve(templist, [u1, u2, u3]) u1 = expand(td[u1]) u2 = expand(td[u2]) u3 = expand(td[u3]) wvec = u1 * self.x + u2 * self.y + u3 * self.z except (CoercionFailed, AssertionError): wvec = self._w_diff_dcm(parent) self._ang_vel_dict.update({parent: wvec}) parent._ang_vel_dict.update({self: -wvec}) def orientnew(self, newname, rot_type, amounts, rot_order='', indices=None, latexs=None): """Creates a new ReferenceFrame oriented with respect to this Frame. See ReferenceFrame.orient() for acceptable rotation types, amounts, and orders. Parent is going to be self. Parameters ========== newname : str The name for the new ReferenceFrame rot_type : str The type of orientation matrix that is being created. amounts : list OR value The quantities that the orientation matrix will be defined by. rot_order : str If applicable, the order of a series of rotations. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.x]) .orient() documentation:\n ======================== """ newframe = ReferenceFrame(newname, indices, latexs) newframe.orient(self, rot_type, amounts, rot_order) return newframe orientnew.__doc__ += orient.__doc__ def set_ang_acc(self, otherframe, value): """Define the angular acceleration Vector in a ReferenceFrame. Defines the angular acceleration of this ReferenceFrame, in another. Angular acceleration can be defined with respect to multiple different ReferenceFrames. Care must be taken to not create loops which are inconsistent. Parameters ========== otherframe : ReferenceFrame A ReferenceFrame to define the angular acceleration in value : Vector The Vector representing angular acceleration Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_acc(N, V) >>> A.ang_acc_in(N) 10*N.x """ self._check_vector(value) self._check_frame(otherframe) self._ang_acc_dict.update({otherframe: value}) otherframe._ang_acc_dict.update({self: -value}) def set_ang_vel(self, otherframe, value): """Define the angular velocity vector in a ReferenceFrame. Defines the angular velocity of this ReferenceFrame, in another. Angular velocity can be defined with respect to multiple different ReferenceFrames. Care must be taken to not create loops which are inconsistent. Parameters ========== otherframe : ReferenceFrame A ReferenceFrame to define the angular velocity in value : Vector The Vector representing angular velocity Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_vel(N, V) >>> A.ang_vel_in(N) 10*N.x """ self._check_vector(value) self._check_frame(otherframe) self._ang_vel_dict.update({otherframe: value}) otherframe._ang_vel_dict.update({self: -value}) @property def x(self): """The basis Vector for the ReferenceFrame, in the x direction. """ return self._x @property def y(self): """The basis Vector for the ReferenceFrame, in the y direction. """ return self._y @property def z(self): """The basis Vector for the ReferenceFrame, in the z direction. """ return self._z class Vector(object): """The class used to define vectors. It along with ReferenceFrame are the building blocks of describing a classical mechanics system in PyDy. Attributes ========== simp : Boolean Let certain methods use trigsimp on their outputs """ simp = False def __init__(self, inlist): """This is the constructor for the Vector class. You shouldn't be calling this, it should only be used by other functions. You should be treating Vectors like you would with if you were doing the math by hand, and getting the first 3 from the standard basis vectors from a ReferenceFrame. """ self.args = [] while len(inlist) != 0: added = 0 for i, v in enumerate(self.args): if inlist[0][1] == self.args[i][1]: self.args[i] = (self.args[i][0] + inlist[0][0], inlist[0][1]) inlist.remove(inlist[0]) added = 1 break if added != 1: self.args.append(inlist[0]) inlist.remove(inlist[0]) i = 0 # This code is to remove empty frames from the list while i < len(self.args): if self.args[i][0] == Matrix([0, 0, 0]): self.args.remove(self.args[i]) i -= 1 i += 1 def __hash__(self): return hash(tuple(self.args)) def __add__(self, other): """The add operator for Vector. """ if isinstance(other, (int, type(Zero()))): if (other == 0): return self self._check_vector(other) return Vector(self.args + other.args) def __and__(self, other): """Dot product of two vectors. Returns a scalar, the dot product of the two Vectors Parameters ========== other : Vector The Vector which we are dotting with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector, dot >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = ReferenceFrame('N') >>> dot(N.x, N.x) 1 >>> dot(N.x, N.y) 0 >>> A = N.orientnew('A', 'Axis', [q1, N.x]) >>> dot(N.y, A.y) cos(q1) """ if isinstance(other, Dyadic): return NotImplemented if isinstance(other, (int, type(Zero()))): if (other == 0): return 0 self._check_vector(other) out = 0 for i, v1 in enumerate(self.args): for j, v2 in enumerate(other.args): out += ((v2[0].T) * (v2[1].dcm(v1[1])) * (v1[0]))[0] if Vector.simp == True: return trigsimp(sympify(out), recursive=True) else: return sympify(out) def __div__(self, other): """This uses mul and inputs self and 1 divided by other. """ return self.__mul__(1 / other) __truediv__ = __div__ def __eq__(self, other): """Tests for equality. It is very import to note that this is only as good as the SymPy equality test; False does not always mean they are not equivalent Vectors. If other is 0, and self is empty, returns True. If other is 0 and self is not empty, returns False. If none of the above, only accepts other as a Vector. """ if isinstance(other, (int, type(Zero()))): if other == 0: if self.args == []: return True else: return False frame = self.args[0][1] for v in frame: if expand((self - other) & v) != 0: return False return True def __mul__(self, other): """Multiplies the Vector by a sympifyable expression. Parameters ========== other : Sympifyable The scalar to multiply this Vector with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> b = Symbol('b') >>> V = 10 * b * N.x >>> print V 10*b*N.x """ newlist = [v for v in self.args] for i, v in enumerate(newlist): newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1]) return Vector(newlist) def __ne__(self, other): return not self.__eq__(other) def __neg__(self): return self * -1 def __or__(self, other): """Outer product between two Vectors. A rank increasing operation, which returns a Dyadic from two Vectors Parameters ========== other : Vector The Vector to take the outer product with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> outer(N.x, N.x) (N.x|N.x) """ if isinstance(other, (int, type(Zero()))): if (other == 0): return 0 ol = 0 for i, v in enumerate(self.args): for i2, v2 in enumerate(other.args): # it looks this way because if we are in the same frame and # use the enumerate function on the same frame in a nested # fashion, then bad things happen ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)]) ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)]) ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)]) ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)]) ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)]) ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)]) ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)]) ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)]) ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)]) return ol def _latex(self, printer=None): """Latex Printing method. """ ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): for j in 0, 1, 2: # if the coef of the basis vector is 1, we skip the 1 if ar[i][0][j] == 1: ol.append(' + ' + ar[i][1].latex_vecs[j]) # if the coef of the basis vector is -1, we skip the 1 elif ar[i][0][j] == -1: ol.append(' - ' + ar[i][1].latex_vecs[j]) elif ar[i][0][j] != 0: # If the coefficient of the basis vector is not 1 or -1; # also, we might wrap it in parentheses, for readability. arg_str = MechanicsStrPrinter().doprint(ar[i][0][j]) if isinstance(ar[i][0][j], Add): arg_str = "(%s)"%arg_str if arg_str[0] == '-': arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + '*' + ar[i][1].latex_vecs[j]) outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def _pretty(self, printer=None): """Pretty Printing method. """ e = self class Fake(object): baseline = 0 def render(self, *args, **kwargs): self = e ar = self.args # just to shorten things if len(ar) == 0: return unicode(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): for j in 0, 1, 2: # if the coef of the basis vector is 1, we skip the 1 if ar[i][0][j] == 1: ol.append(u" + " + ar[i][1].pretty_vecs[j]) # if the coef of the basis vector is -1, we skip the 1 elif ar[i][0][j] == -1: ol.append(u" - " + ar[i][1].pretty_vecs[j]) elif ar[i][0][j] != 0: # If the basis vector coeff is not 1 or -1, # we might wrap it in parentheses, for readability. arg_str = (MechanicsPrettyPrinter().doprint( ar[i][0][j])) if isinstance(ar[i][0][j], Add): arg_str = u"(%s)"%arg_str if arg_str[0] == u"-": arg_str = arg_str[1:] str_start = u" - " else: str_start = u" + " ol.append(str_start + arg_str + '*' + ar[i][1].pretty_vecs[j]) outstr = u"".join(ol) if outstr.startswith(u" + "): outstr = outstr[3:] elif outstr.startswith(" "): outstr = outstr[1:] return outstr return Fake() def __ror__(self, other): """Outer product between two Vectors. A rank increasing operation, which returns a Dyadic from two Vectors Parameters ========== other : Vector The Vector to take the outer product with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> outer(N.x, N.x) (N.x|N.x) """ if isinstance(other, (int, type(Zero()))): if (other == 0): return 0 ol = 0 for i, v in enumerate(other.args): for i2, v2 in enumerate(self.args): # it looks this way because if we are in the same frame and # use the enumerate function on the same frame in a nested # fashion, then bad things happen ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)]) ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)]) ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)]) ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)]) ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)]) ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)]) ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)]) ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)]) ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)]) return ol def __rsub__(self, other): return (-1 * self) + other def __str__(self, printer=None): """Printing method. """ ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): for j in 0, 1, 2: # if the coef of the basis vector is 1, we skip the 1 if ar[i][0][j] == 1: ol.append(' + ' + ar[i][1].str_vecs[j]) # if the coef of the basis vector is -1, we skip the 1 elif ar[i][0][j] == -1: ol.append(' - ' + ar[i][1].str_vecs[j]) elif ar[i][0][j] != 0: # If the coefficient of the basis vector is not 1 or -1; # also, we might wrap it in parentheses, for readability. arg_str = MechanicsStrPrinter().doprint(ar[i][0][j]) if isinstance(ar[i][0][j], Add): arg_str = "(%s)"%arg_str if arg_str[0] == '-': arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + '*' + ar[i][1].str_vecs[j]) outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def __sub__(self, other): """The subraction operator. """ return self.__add__(other * -1) def __xor__(self, other): """The cross product operator for two Vectors. Returns a Vector, expressed in the same ReferenceFrames as self. Parameters ========== other : Vector The Vector which we are crossing with Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = ReferenceFrame('N') >>> N.x ^ N.y N.z >>> A = N.orientnew('A', 'Axis', [q1, N.x]) >>> A.x ^ N.y N.z >>> N.y ^ A.x - sin(q1)*A.y - cos(q1)*A.z """ if isinstance(other, Dyadic): return NotImplemented if isinstance(other, (int, type(Zero()))): if (other == 0): return self * 0 self._check_vector(other) def _det(mat): """This is needed as a little method for to find the determinant of a list in python; needs to work for a 3x3 list. SymPy's Matrix won't take in Vector, so need a custom function. You shouldn't be calling this. """ return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1]) + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] * mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] - mat[1][1] * mat[2][0])) outvec = Vector([]) ar = other.args # For brevity for i, v in enumerate(ar): tempx = v[1].x tempy = v[1].y tempz = v[1].z tempm = ([[tempx, tempy, tempz], [self & tempx, self & tempy, self & tempz], [Vector([ar[i]]) & tempx, Vector([ar[i]]) & tempy, Vector([ar[i]]) & tempz]]) outvec += _det(tempm) return outvec def _check_frame(self, other): if not isinstance(other, ReferenceFrame): raise TypeError('A ReferenceFrame must be supplied') def _check_vector(self, other): if isinstance(other, (int, type(Zero()))): if other == 0: return if not isinstance(other, Vector): raise TypeError('A Vector must be supplied') _sympystr = __str__ _sympyrepr = _sympystr __repr__ = __str__ __radd__ = __add__ __rand__ = __and__ __rmul__ = __mul__ def dot(self, other): return self & other dot.__doc__ = __and__.__doc__ def cross(self, other): return self ^ other cross.__doc__ = __xor__.__doc__ def outer(self, other): return self | other outer.__doc__ = __or__.__doc__ def diff(self, wrt, otherframe): """Takes the partial derivative, with respect to a value, in a frame. Returns a Vector. Parameters ========== wrt : Symbol What the partial derivative is taken with respect to. otherframe : ReferenceFrame The ReferenceFrame that the partial derivative is taken in. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector, dynamicsymbols >>> from sympy import Symbol >>> Vector.simp = True >>> t = Symbol('t') >>> q1 = dynamicsymbols('q1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.y]) >>> A.x.diff(t, N) - q1'*A.z """ wrt = sympify(wrt) self._check_frame(otherframe) outvec = 0 for i,v in enumerate(self.args): if v[1] == otherframe: outvec += Vector([(v[0].diff(wrt), otherframe)]) else: if otherframe.dcm(v[1]).diff(wrt) == zeros(3, 3): d = v[0].diff(wrt) outvec += Vector([(d, v[1])]) else: d = (Vector([v]).express(otherframe)).args[0][0].diff(wrt) outvec += Vector([(d, otherframe)]).express(v[1]) return outvec def dt(self, otherframe): """Returns the time derivative of the Vector in a ReferenceFrame. Returns a Vector which is the time derivative of the self Vector, taken in frame otherframe. Parameters ========== otherframe : ReferenceFrame The ReferenceFrame that the partial derivative is taken in. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector, dynamicsymbols >>> from sympy import Symbol >>> q1 = Symbol('q1') >>> u1 = dynamicsymbols('u1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.x]) >>> v = u1 * N.x >>> A.set_ang_vel(N, 10*A.x) >>> A.x.dt(N) == 0 True >>> v.dt(N) u1'*N.x """ outvec = 0 self._check_frame(otherframe) for i, v in enumerate(self.args): if v[1] == otherframe: outvec += Vector([(v[0].diff(dynamicsymbols._t), otherframe)]) else: outvec += (Vector([v]).dt(v[1]) + (v[1].ang_vel_in(otherframe) ^ Vector([v]))) return outvec def express(self, otherframe): """Returns a vector, expressed in the other frame. A new Vector is returned, equalivalent to this Vector, but its components are all defined in only the otherframe. Parameters ========== otherframe : ReferenceFrame The frame for this Vector to be described in Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame, Vector, dynamicsymbols >>> q1 = dynamicsymbols('q1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.y]) >>> A.x.express(N) cos(q1)*N.x - sin(q1)*N.z """ self._check_frame(otherframe) outvec = Vector(self.args + []) for i, v in enumerate(self.args): if v[1] != otherframe: temp = otherframe.dcm(v[1]) * v[0] for i2, v2 in enumerate(temp): if Vector.simp == True: temp[i2] = trigsimp(v2, recursive=True) else: temp[i2] = v2 outvec += Vector([(temp, otherframe)]) outvec -= Vector([v]) return outvec def subs(self, dictin): """Substituion on the Vector, with a dict. Examples ======== >>> from sympy.physics.mechanics import ReferenceFrame >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> s = Symbol('s') >>> a = N.x * s >>> a.subs({s: 2}) 2*N.x """ ov = 0 for i, v in enumerate(self.args): ov += Vector([(v[0].subs(dictin), v[1])]) return ov def magnitude(self): """Returns the magnitude (Euclidean norm) of self.""" return sqrt(self & self) def normalize(self): """Returns a Vector of magnitude 1, codirectional with self.""" return Vector(self.args + []) / self.magnitude() class MechanicsStrPrinter(StrPrinter): """String Printer for mechanics. """ def _print_Derivative(self, e): t = dynamicsymbols._t if (bool(sum([i == t for i in e.variables])) & isinstance(type(e.args[0]), UndefinedFunction)): ol = str(e.args[0].func) for i, v in enumerate(e.variables): ol += dynamicsymbols._str return ol else: return StrPrinter().doprint(e) def _print_Function(self, e): t = dynamicsymbols._t if isinstance(type(e), UndefinedFunction): return StrPrinter().doprint(e).replace("(%s)"%t, '') return e.func.__name__ + "(%s)"%self.stringify(e.args, ", ") class MechanicsLatexPrinter(LatexPrinter): """Latex Printer for mechanics. """ def _print_Function(self, expr, exp=None): func = expr.func.__name__ t = dynamicsymbols._t if hasattr(self, '_print_' + func): return getattr(self, '_print_' + func)(expr, exp) elif isinstance(type(expr), UndefinedFunction) and (expr.args == (t,)): name, sup, sub = split_super_sub(func) if len(sup) != 0: sup = r"^{%s}" % "".join(sup) else: sup = r"" if len(sub) != 0: sub = r"_{%s}" % "".join(sub) else: sub = r"" return r"%s" % (name + sup + sub) else: args = [ str(self._print(arg)) for arg in expr.args ] # How inverse trig functions should be displayed, formats are: # abbreviated: asin, full: arcsin, power: sin^-1 inv_trig_style = self._settings['inv_trig_style'] # If we are dealing with a power-style inverse trig function inv_trig_power_case = False # If it is applicable to fold the argument brackets can_fold_brackets = self._settings['fold_func_brackets'] and \ len(args) == 1 and \ not self._needs_function_brackets(expr.args[0]) inv_trig_table = ["asin", "acos", "atan", "acot"] # If the function is an inverse trig function, handle the style if func in inv_trig_table: if inv_trig_style == "abbreviated": func = func elif inv_trig_style == "full": func = "arc" + func[1:] elif inv_trig_style == "power": func = func[1:] inv_trig_power_case = True # Can never fold brackets if we're raised to a power if exp is not None: can_fold_brackets = False if inv_trig_power_case: name = r"\operatorname{%s}^{-1}" % func elif exp is not None: name = r"\operatorname{%s}^{%s}" % (func, exp) else: name = r"\operatorname{%s}" % func if can_fold_brackets: name += r"%s" else: name += r"\left(%s\right)" if inv_trig_power_case and exp is not None: name += r"^{%s}" % exp return name % ",".join(args) def _print_Derivative(self, expr): expr = Derivative(expr) t = dynamicsymbols._t syms = list(reversed(expr.variables)) dots = 0 while len(syms) > 0: if syms[-1] == t: syms.pop() dots += 1 else: break base = self._print(expr.expr) if dots == 1: base = r"\dot{%s}" % self._print(expr.expr) if dots == 2: base = r"\ddot{%s}" % self._print(expr.expr) if dots == 3: base = r"\dddot{%s}" % self._print(expr.expr) expr = Derivative(expr.expr, *syms) dim = len(expr.variables) if dim == 1: tex = r"\frac{\partial}{\partial %s}" % \ self._print(expr.variables[0]) else: multiplicity, i, tex = [], 1, "" current = expr.variables[0] for symbol in expr.variables[1:]: if symbol == current: i = i + 1 else: multiplicity.append((current, i)) current, i = symbol, 1 else: multiplicity.append((current, i)) for x, i in multiplicity: if i == 1: tex += r"\partial %s" % self._print(x) else: tex += r"\partial^{%s} %s" % (i, self._print(x)) tex = r"\frac{\partial^{%s}}{%s} " % (dim, tex) if isinstance(expr.expr, C.AssocOp): return r"%s\left(%s\right)" % (tex, base) else: return r"%s %s" % (tex, base) class MechanicsPrettyPrinter(PrettyPrinter): """Pretty Printer for mechanics. """ def _print_Derivative(self, deriv): # XXX use U('PARTIAL DIFFERENTIAL') here ? t = dynamicsymbols._t dots = 0 can_break = True syms = list(reversed(deriv.variables)) x = None while len(syms) > 0: if syms[-1] == t: syms.pop() dots += 1 else: break f = prettyForm(binding=prettyForm.FUNC, *self._print(deriv.expr)) if not (isinstance(type(deriv.expr), UndefinedFunction) and (deriv.expr.args == (t,))): dots = 0 can_break = False f = prettyForm(binding=prettyForm.FUNC, *self._print(deriv.expr).parens()) if dots == 0: dots = u"" elif dots == 1: dots = u"\u0307" elif dots == 2: dots = u"\u0308" elif dots == 3: dots = u"\u20db" elif dots == 4: dots = u"\u20dc" uni_subs = [u"\u2080", u"\u2081", u"\u2082", u"\u2083", u"\u2084", u"\u2085", u"\u2086", u"\u2087", u"\u2088", u"\u2089", u"\u208a", u"\u208b", u"\u208c", u"\u208d", u"\u208e", u"\u208f", u"\u2090", u"\u2091", u"\u2092", u"\u2093", u"\u2094", u"\u2095", u"\u2096", u"\u2097", u"\u2098", u"\u2099", u"\u209a", u"\u209b", u"\u209c", u"\u209d", u"\u209e", u"\u209f"] fpic = f.__dict__['picture'] funi = f.__dict__['unicode'] ind = len(funi) val = "" for i in uni_subs: cur_ind = funi.find(i) if (cur_ind != -1) and (cur_ind < ind): ind = cur_ind val = i if ind == len(funi): funi += dots else: funi = funi.replace(val, dots + val) if f.__dict__['picture'] == [f.__dict__['unicode']]: fpic = [funi] f.__dict__['picture'] = fpic f.__dict__['unicode'] = funi if (len(syms)) == 0 and can_break: return f for sym, num in group(syms, multiple=False): s = self._print(sym) ds = prettyForm(*s.left('d')) if num > 1: ds = ds**prettyForm(str(num)) if x is None: x = ds else: x = prettyForm(*x.right(' ')) x = prettyForm(*x.right(ds)) pform = prettyForm('d') if len(syms) > 1: pform = pform**prettyForm(str(len(syms))) pform = prettyForm(*pform.below(stringPict.LINE, x)) pform.baseline = pform.baseline + 1 pform = prettyForm(*stringPict.next(pform, f)) return pform def _print_Function(self, e): t = dynamicsymbols._t # XXX works only for applied functions func = e.func args = e.args func_name = func.__name__ prettyFunc = self._print(C.Symbol(func_name)) prettyArgs = prettyForm(*self._print_seq(args).parens()) # If this function is an Undefined function of t, it is probably a # dynamic symbol, so we'll skip the (t). The rest of the code is # identical to the normal PrettyPrinter code if isinstance(func, UndefinedFunction) and (args == (t,)): pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc)) else: pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs)) # store pform parts so it can be reassembled e.g. when powered pform.prettyFunc = prettyFunc pform.prettyArgs = prettyArgs return pform def dynamicsymbols(names, level=0): """Uses symbols and Function for functions of time. Creates a SymPy UndefinedFunction, which is then initialized as a function of a variable, the default being Symbol('t'). Parameters ========== names : str Names of the dynamic symbols you want to create; works the same way as inputs to symbols level : int Level of differentiation of the returned function; d/dt once of t, twice of t, etc. Examples ======= >>> from sympy.physics.mechanics import dynamicsymbols >>> from sympy import diff, Symbol >>> q1 = dynamicsymbols('q1') >>> q1 q1(t) >>> diff(q1, Symbol('t')) Derivative(q1(t), t) """ esses = symbols(names, cls=Function) try: esses = [i.__call__(dynamicsymbols._t) for i in list(esses)] ol = esses for i in range(level): ol = [] for j, v in enumerate(esses): ol.append(diff(v, dynamicsymbols._t)) esses = ol return list(ol) except TypeError: esses = esses.__call__(dynamicsymbols._t) for i in range(level): esses = diff(esses, dynamicsymbols._t) return esses dynamicsymbols._t = Symbol('t') dynamicsymbols._str = '\''
{ "repo_name": "ichuang/sympy", "path": "sympy/physics/mechanics/essential.py", "copies": "1", "size": "68156", "license": "bsd-3-clause", "hash": -4581322892797499400, "line_mean": 33.5093670886, "line_max": 86, "alpha_frac": 0.4827161218, "autogenerated": false, "ratio": 3.8412895226286423, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4824005644428643, "avg_score": null, "num_lines": null }
__all__ = ['register', 'sharedPackages', 'reloadSharedPackage', 'reloadSharedPackages'] from panda3d.core import Filename, VirtualFileSystem, VirtualFileMountSystem, OFileStream, copyStream from direct.stdpy.file import open import sys import marshal import imp import types # The sharedPackages dictionary lists all of the "shared packages", # special Python packages that automatically span multiple directories # via magic in the VFSImporter. You can make a package "shared" # simply by adding its name into this dictionary (and then calling # reloadSharedPackages() if it's already been imported). # When a package name is in this dictionary at import time, *all* # instances of the package are located along sys.path, and merged into # a single Python module with a __path__ setting that represents the # union. Thus, you can have a direct.showbase.foo in your own # application, and loading it won't shadow the system # direct.showbase.ShowBase which is in a different directory on disk. sharedPackages = {} vfs = VirtualFileSystem.getGlobalPtr() compiledExtensions = [ 'pyc', 'pyo' ] if not __debug__: # In optimized mode, we prefer loading .pyo files over .pyc files. # We implement that by reversing the extension names. compiledExtensions = [ 'pyo', 'pyc' ] class VFSImporter: """ This class serves as a Python importer to support loading Python .py and .pyc/.pyo files from Panda's Virtual File System, which allows loading Python source files from mounted .mf files (among other places). """ def __init__(self, path): if isinstance(path, Filename): self.dir_path = Filename(path) else: self.dir_path = Filename.fromOsSpecific(path) def find_module(self, fullname, path = None): if path is None: dir_path = self.dir_path else: dir_path = path #print >>sys.stderr, "find_module(%s), dir_path = %s" % (fullname, dir_path) basename = fullname.split('.')[-1] path = Filename(dir_path, basename) # First, look for Python files. filename = Filename(path) filename.setExtension('py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=('.py', 'U', imp.PY_SOURCE)) # If there's no .py file, but there's a .pyc file, load that # anyway. for ext in compiledExtensions: filename = Filename(path) filename.setExtension(ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=('.'+ext, 'rb', imp.PY_COMPILED)) # Look for a C/C++ extension module. for desc in imp.get_suffixes(): if desc[2] != imp.C_EXTENSION: continue filename = Filename(path + desc[0]) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=desc) # Finally, consider a package, i.e. a directory containing # __init__.py. filename = Filename(path, '__init__.py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, packagePath=path, desc=('.py', 'U', imp.PY_SOURCE)) for ext in compiledExtensions: filename = Filename(path, '__init__.' + ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, packagePath=path, desc=('.'+ext, 'rb', imp.PY_COMPILED)) #print >>sys.stderr, "not found." return None class VFSLoader: """ The second part of VFSImporter, this is created for a particular .py file or directory. """ def __init__(self, dir_path, vfile, filename, desc, packagePath=None): self.dir_path = dir_path self.timestamp = None if vfile: self.timestamp = vfile.getTimestamp() self.filename = filename self.desc = desc self.packagePath = packagePath def load_module(self, fullname, loadingShared = False): #print >>sys.stderr, "load_module(%s), dir_path = %s, filename = %s" % (fullname, self.dir_path, self.filename) if self.desc[2] == imp.PY_FROZEN: return self._import_frozen_module(fullname) if self.desc[2] == imp.C_EXTENSION: return self._import_extension_module(fullname) # Check if this is a child of a shared package. if not loadingShared and self.packagePath and '.' in fullname: parentname = fullname.rsplit('.', 1)[0] if parentname in sharedPackages: # It is. That means it's a shared package too. parent = sys.modules[parentname] path = getattr(parent, '__path__', None) importer = VFSSharedImporter() sharedPackages[fullname] = True loader = importer.find_module(fullname, path = path) assert loader return loader.load_module(fullname) code = self._read_code() if not code: raise ImportError('No Python code in %s' % (fullname)) mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) mod.__file__ = self.filename.toOsSpecific() mod.__loader__ = self if self.packagePath: mod.__path__ = [self.packagePath.toOsSpecific()] #print >> sys.stderr, "loaded %s, path = %s" % (fullname, mod.__path__) exec(code, mod.__dict__) return sys.modules[fullname] def getdata(self, path): path = Filename(self.dir_path, Filename.fromOsSpecific(path)) vfile = vfs.getFile(path) if not vfile: raise IOError("Could not find '%s'" % (path)) return vfile.readFile(True) def is_package(self, fullname): return bool(self.packagePath) def get_code(self, fullname): return self._read_code() def get_source(self, fullname): return self._read_source() def get_filename(self, fullname): return self.filename.toOsSpecific() def _read_source(self): """ Returns the Python source for this file, if it is available, or None if it is not. May raise IOError. """ if self.desc[2] == imp.PY_COMPILED or \ self.desc[2] == imp.C_EXTENSION: return None filename = Filename(self.filename) filename.setExtension('py') filename.setText() return open(self.filename, self.desc[1]).read() def _import_extension_module(self, fullname): """ Loads the binary shared object as a Python module, and returns it. """ vfile = vfs.getFile(self.filename, False) # We can only import an extension module if it already exists on # disk. This means if it's a truly virtual file that has no # on-disk equivalent, we have to write it to a temporary file # first. if hasattr(vfile, 'getMount') and \ isinstance(vfile.getMount(), VirtualFileMountSystem): # It's a real file. filename = self.filename elif self.filename.exists(): # It's a virtual file, but it's shadowing a real file in # the same directory. Assume they're the same, and load # the real one. filename = self.filename else: # It's a virtual file with no real-world existence. Dump # it to disk. TODO: clean up this filename. filename = Filename.temporary('', self.filename.getBasenameWoExtension(), '.' + self.filename.getExtension(), type = Filename.TDso) filename.setExtension(self.filename.getExtension()) filename.setBinary() sin = vfile.openReadFile(True) sout = OFileStream() if not filename.openWrite(sout): raise IOError if not copyStream(sin, sout): raise IOError vfile.closeReadFile(sin) del sout module = imp.load_module(fullname, None, filename.toOsSpecific(), self.desc) module.__file__ = self.filename.toOsSpecific() return module def _import_frozen_module(self, fullname): """ Imports the frozen module without messing around with searching any more. """ #print >>sys.stderr, "importing frozen %s" % (fullname) module = imp.load_module(fullname, None, fullname, ('', '', imp.PY_FROZEN)) # Workaround for bug in Python 2. if getattr(module, '__path__', None) == fullname: module.__path__ = [] return module def _read_code(self): """ Returns the Python compiled code object for this file, if it is available, or None if it is not. May raise IOError, ValueError, SyntaxError, or a number of other errors generated by the low-level system. """ if self.desc[2] == imp.PY_COMPILED: # It's a pyc file; just read it directly. pycVfile = vfs.getFile(self.filename, False) if pycVfile: return self._loadPyc(pycVfile, None) raise IOError('Could not read %s' % (self.filename)) elif self.desc[2] == imp.C_EXTENSION: return None # It's a .py file (or an __init__.py file; same thing). Read # the .pyc file if it is available and current; otherwise read # the .py file and compile it. t_pyc = None for ext in compiledExtensions: pycFilename = Filename(self.filename) pycFilename.setExtension(ext) pycVfile = vfs.getFile(pycFilename, False) if pycVfile: t_pyc = pycVfile.getTimestamp() break code = None if t_pyc and t_pyc >= self.timestamp: try: code = self._loadPyc(pycVfile, self.timestamp) except ValueError: code = None if not code: source = self._read_source() filename = Filename(self.filename) filename.setExtension('py') code = self._compile(filename, source) return code def _loadPyc(self, vfile, timestamp): """ Reads and returns the marshal data from a .pyc file. Raises ValueError if there is a problem. """ code = None data = vfile.readFile(True) if data[:4] != imp.get_magic(): raise ValueError("Bad magic number in %s" % (vfile)) if sys.version_info >= (3, 0): t = int.from_bytes(data[4:8], 'little') data = data[12:] else: t = ord(data[4]) + (ord(data[5]) << 8) + \ (ord(data[6]) << 16) + (ord(data[7]) << 24) data = data[8:] if not timestamp or t == timestamp: return marshal.loads(data) else: raise ValueError("Timestamp wrong on %s" % (vfile)) def _compile(self, filename, source): """ Compiles the Python source code to a code object and attempts to write it to an appropriate .pyc file. May raise SyntaxError or other errors generated by the compiler. """ if source and source[-1] != '\n': source = source + '\n' code = compile(source, filename.toOsSpecific(), 'exec') # try to cache the compiled code pycFilename = Filename(filename) pycFilename.setExtension(compiledExtensions[0]) try: f = open(pycFilename.toOsSpecific(), 'wb') except IOError: pass else: f.write(imp.get_magic()) if sys.version_info >= (3, 0): f.write((self.timestamp & 0xffffffff).to_bytes(4, 'little')) f.write(b'\0\0\0\0') else: f.write(chr(self.timestamp & 0xff) + chr((self.timestamp >> 8) & 0xff) + chr((self.timestamp >> 16) & 0xff) + chr((self.timestamp >> 24) & 0xff)) f.write(marshal.dumps(code)) f.close() return code class VFSSharedImporter: """ This is a special importer that is added onto the meta_path list, so that it is called before sys.path is traversed. It uses special logic to load one of the "shared" packages, by searching the entire sys.path for all instances of this shared package, and merging them. """ def __init__(self): pass def find_module(self, fullname, path = None, reload = False): #print >>sys.stderr, "shared find_module(%s), path = %s" % (fullname, path) if fullname not in sharedPackages: # Not a shared package; fall back to normal import. return None if path is None: path = sys.path excludePaths = [] if reload: # If reload is true, we are simply reloading the module, # looking for new paths to add. mod = sys.modules[fullname] excludePaths = getattr(mod, '_vfs_shared_path', None) if excludePaths is None: # If there isn't a _vfs_shared_path symbol already, # the module must have been loaded through # conventional means. Try to guess which path it was # found on. d = self.getLoadedDirname(mod) excludePaths = [d] loaders = [] for dir in path: if dir in excludePaths: continue importer = sys.path_importer_cache.get(dir, None) if importer is None: try: importer = VFSImporter(dir) except ImportError: continue sys.path_importer_cache[dir] = importer try: loader = importer.find_module(fullname) if not loader: continue except ImportError: continue loaders.append(loader) if not loaders: return None return VFSSharedLoader(loaders, reload = reload) def getLoadedDirname(self, mod): """ Returns the directory name that the indicated conventionally-loaded module must have been loaded from. """ if not getattr(mod, '__file__', None): return None fullname = mod.__name__ dirname = Filename.fromOsSpecific(mod.__file__).getDirname() parentname = None basename = fullname if '.' in fullname: parentname, basename = fullname.rsplit('.', 1) path = None if parentname: parent = sys.modules[parentname] path = parent.__path__ if path is None: path = sys.path for dir in path: pdir = str(Filename.fromOsSpecific(dir)) if pdir + '/' + basename == dirname: # We found it! return dir # Couldn't figure it out. return None class VFSSharedLoader: """ The second part of VFSSharedImporter, this imports a list of packages and combines them. """ def __init__(self, loaders, reload): self.loaders = loaders self.reload = reload def load_module(self, fullname): #print >>sys.stderr, "shared load_module(%s), loaders = %s" % (fullname, map(lambda l: l.dir_path, self.loaders)) mod = None message = None path = [] vfs_shared_path = [] if self.reload: mod = sys.modules[fullname] path = mod.__path__ or [] if path == fullname: # Work around Python bug setting __path__ of frozen modules. path = [] vfs_shared_path = getattr(mod, '_vfs_shared_path', []) for loader in self.loaders: try: mod = loader.load_module(fullname, loadingShared = True) except ImportError: etype, evalue, etraceback = sys.exc_info() print("%s on %s: %s" % (etype.__name__, fullname, evalue)) if not message: message = '%s: %s' % (fullname, evalue) continue for dir in getattr(mod, '__path__', []): if dir not in path: path.append(dir) if mod is None: # If all of them failed to load, raise ImportError. raise ImportError(message) # If at least one of them loaded successfully, return the # union of loaded modules. mod.__path__ = path mod.__package__ = fullname # Also set this special symbol, which records that this is a # shared package, and also lists the paths we have already # loaded. mod._vfs_shared_path = vfs_shared_path + [l.dir_path for l in self.loaders] return mod _registered = False def register(): """ Register the VFSImporter on the path_hooks, if it has not already been registered, so that future Python import statements will vector through here (and therefore will take advantage of Panda's virtual file system). """ global _registered if not _registered: _registered = True sys.path_hooks.insert(0, VFSImporter) sys.meta_path.insert(0, VFSSharedImporter()) # Blow away the importer cache, so we'll come back through the # VFSImporter for every folder in the future, even those # folders that previously were loaded directly. sys.path_importer_cache = {} def reloadSharedPackage(mod): """ Reloads the specific module as a shared package, adding any new directories that might have appeared on the search path. """ fullname = mod.__name__ path = None if '.' in fullname: parentname = fullname.rsplit('.', 1)[0] parent = sys.modules[parentname] path = parent.__path__ importer = VFSSharedImporter() loader = importer.find_module(fullname, path = path, reload = True) if loader: loader.load_module(fullname) # Also force any child packages to become shared packages, if # they aren't already. for basename, child in list(mod.__dict__.items()): if isinstance(child, types.ModuleType): childname = child.__name__ if childname == fullname + '.' + basename and \ hasattr(child, '__path__') and \ childname not in sharedPackages: sharedPackages[childname] = True reloadSharedPackage(child) def reloadSharedPackages(): """ Walks through the sharedPackages list, and forces a reload of any modules on that list that have already been loaded. This allows new directories to be added to the search path. """ #print >> sys.stderr, "reloadSharedPackages, path = %s, sharedPackages = %s" % (sys.path, sharedPackages.keys()) # Sort the list, just to make sure parent packages are reloaded # before child packages are. for fullname in sorted(sharedPackages.keys()): mod = sys.modules.get(fullname, None) if not mod: continue reloadSharedPackage(mod)
{ "repo_name": "grimfang/panda3d", "path": "direct/src/showbase/VFSImporter.py", "copies": "10", "size": "19654", "license": "bsd-3-clause", "hash": 5164668734121830000, "line_mean": 35.8742964353, "line_max": 121, "alpha_frac": 0.5729113666, "autogenerated": false, "ratio": 4.270751847023034, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0037874953787947643, "num_lines": 533 }
__all__ = ['register', 'sharedPackages', 'reloadSharedPackage', 'reloadSharedPackages'] from panda3d._core import Filename, VirtualFileSystem, VirtualFileMountSystem, OFileStream, copyStream import sys import marshal import imp import types import __builtin__ # The sharedPackages dictionary lists all of the "shared packages", # special Python packages that automatically span multiple directories # via magic in the VFSImporter. You can make a package "shared" # simply by adding its name into this dictionary (and then calling # reloadSharedPackages() if it's already been imported). # When a package name is in this dictionary at import time, *all* # instances of the package are located along sys.path, and merged into # a single Python module with a __path__ setting that represents the # union. Thus, you can have a direct.showbase.foo in your own # application, and loading it won't shadow the system # direct.showbase.ShowBase which is in a different directory on disk. sharedPackages = {} vfs = VirtualFileSystem.getGlobalPtr() compiledExtensions = [ 'pyc', 'pyo' ] if not __debug__: # In optimized mode, we prefer loading .pyo files over .pyc files. # We implement that by reversing the extension names. compiledExtensions = [ 'pyo', 'pyc' ] class VFSImporter: """ This class serves as a Python importer to support loading Python .py and .pyc/.pyo files from Panda's Virtual File System, which allows loading Python source files from mounted .mf files (among other places). """ def __init__(self, path): if isinstance(path, Filename): self.dir_path = Filename(path) else: self.dir_path = Filename.fromOsSpecific(path) def find_module(self, fullname, path = None): if path is None: dir_path = self.dir_path else: dir_path = path #print >>sys.stderr, "find_module(%s), dir_path = %s" % (fullname, dir_path) basename = fullname.split('.')[-1] path = Filename(dir_path, basename) # First, look for Python files. filename = Filename(path) filename.setExtension('py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=('.py', 'U', imp.PY_SOURCE)) # If there's no .py file, but there's a .pyc file, load that # anyway. for ext in compiledExtensions: filename = Filename(path) filename.setExtension(ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=('.'+ext, 'rb', imp.PY_COMPILED)) # Look for a C/C++ extension module. for desc in imp.get_suffixes(): if desc[2] != imp.C_EXTENSION: continue filename = Filename(path + desc[0]) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, desc=desc) # Finally, consider a package, i.e. a directory containing # __init__.py. filename = Filename(path, '__init__.py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, packagePath=path, desc=('.py', 'U', imp.PY_SOURCE)) for ext in compiledExtensions: filename = Filename(path, '__init__.' + ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, packagePath=path, desc=('.'+ext, 'rb', imp.PY_COMPILED)) #print >>sys.stderr, "not found." return None class VFSLoader: """ The second part of VFSImporter, this is created for a particular .py file or directory. """ def __init__(self, dir_path, vfile, filename, desc, packagePath=None): self.dir_path = dir_path self.timestamp = None if vfile: self.timestamp = vfile.getTimestamp() self.filename = filename self.desc = desc self.packagePath = packagePath def load_module(self, fullname, loadingShared = False): #print >>sys.stderr, "load_module(%s), dir_path = %s, filename = %s" % (fullname, self.dir_path, self.filename) if self.desc[2] == imp.PY_FROZEN: return self._import_frozen_module(fullname) if self.desc[2] == imp.C_EXTENSION: return self._import_extension_module(fullname) # Check if this is a child of a shared package. if not loadingShared and self.packagePath and '.' in fullname: parentname = fullname.rsplit('.', 1)[0] if parentname in sharedPackages: # It is. That means it's a shared package too. parent = sys.modules[parentname] path = getattr(parent, '__path__', None) importer = VFSSharedImporter() sharedPackages[fullname] = True loader = importer.find_module(fullname, path = path) assert loader return loader.load_module(fullname) code = self._read_code() if not code: raise ImportError('No Python code in %s' % (fullname)) mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) mod.__file__ = self.filename.toOsSpecific() mod.__loader__ = self if self.packagePath: mod.__path__ = [self.packagePath.toOsSpecific()] #print >> sys.stderr, "loaded %s, path = %s" % (fullname, mod.__path__) exec(code, mod.__dict__) return sys.modules[fullname] def getdata(self, path): path = Filename(self.dir_path, Filename.fromOsSpecific(path)) vfile = vfs.getFile(path) if not vfile: raise IOError("Could not find '%s'" % (path)) return vfile.readFile(True) def is_package(self, fullname): return bool(self.packagePath) def get_code(self, fullname): return self._read_code() def get_source(self, fullname): return self._read_source() def get_filename(self, fullname): return self.filename.toOsSpecific() def _read_source(self): """ Returns the Python source for this file, if it is available, or None if it is not. May raise IOError. """ if self.desc[2] == imp.PY_COMPILED or \ self.desc[2] == imp.C_EXTENSION: return None filename = Filename(self.filename) filename.setExtension('py') filename.setText() vfile = vfs.getFile(filename) if not vfile: raise IOError("Could not find '%s'" % (filename)) return vfile.readFile(True) def _import_extension_module(self, fullname): """ Loads the binary shared object as a Python module, and returns it. """ vfile = vfs.getFile(self.filename, False) # We can only import an extension module if it already exists on # disk. This means if it's a truly virtual file that has no # on-disk equivalent, we have to write it to a temporary file # first. if hasattr(vfile, 'getMount') and \ isinstance(vfile.getMount(), VirtualFileMountSystem): # It's a real file. filename = self.filename elif self.filename.exists(): # It's a virtual file, but it's shadowing a real file in # the same directory. Assume they're the same, and load # the real one. filename = self.filename else: # It's a virtual file with no real-world existence. Dump # it to disk. TODO: clean up this filename. filename = Filename.temporary('', self.filename.getBasenameWoExtension(), '.' + self.filename.getExtension(), type = Filename.TDso) filename.setExtension(self.filename.getExtension()) filename.setBinary() sin = vfile.openReadFile(True) sout = OFileStream() if not filename.openWrite(sout): raise IOError if not copyStream(sin, sout): raise IOError vfile.closeReadFile(sin) del sout module = imp.load_module(fullname, None, filename.toOsSpecific(), self.desc) module.__file__ = self.filename.toOsSpecific() return module def _import_frozen_module(self, fullname): """ Imports the frozen module without messing around with searching any more. """ #print >>sys.stderr, "importing frozen %s" % (fullname) module = imp.load_module(fullname, None, fullname, ('', '', imp.PY_FROZEN)) # Workaround for bug in Python 2. if getattr(module, '__path__', None) == fullname: module.__path__ = [] return module def _read_code(self): """ Returns the Python compiled code object for this file, if it is available, or None if it is not. May raise IOError, ValueError, SyntaxError, or a number of other errors generated by the low-level system. """ if self.desc[2] == imp.PY_COMPILED: # It's a pyc file; just read it directly. pycVfile = vfs.getFile(self.filename, False) if pycVfile: return self._loadPyc(pycVfile, None) raise IOError('Could not read %s' % (self.filename)) elif self.desc[2] == imp.C_EXTENSION: return None # It's a .py file (or an __init__.py file; same thing). Read # the .pyc file if it is available and current; otherwise read # the .py file and compile it. t_pyc = None for ext in compiledExtensions: pycFilename = Filename(self.filename) pycFilename.setExtension(ext) pycVfile = vfs.getFile(pycFilename, False) if pycVfile: t_pyc = pycVfile.getTimestamp() break code = None if t_pyc and t_pyc >= self.timestamp: try: code = self._loadPyc(pycVfile, self.timestamp) except ValueError: code = None if not code: source = self._read_source() filename = Filename(self.filename) filename.setExtension('py') code = self._compile(filename, source) return code def _loadPyc(self, vfile, timestamp): """ Reads and returns the marshal data from a .pyc file. Raises ValueError if there is a problem. """ code = None data = vfile.readFile(True) if data[:4] != imp.get_magic(): raise ValueError("Bad magic number in %s" % (vfile)) if sys.version_info >= (3, 0): t = int.from_bytes(data[4:8], 'little') data = data[12:] else: t = ord(data[4]) + (ord(data[5]) << 8) + \ (ord(data[6]) << 16) + (ord(data[7]) << 24) data = data[8:] if not timestamp or t == timestamp: return marshal.loads(data) else: raise ValueError("Timestamp wrong on %s" % (vfile)) def _compile(self, filename, source): """ Compiles the Python source code to a code object and attempts to write it to an appropriate .pyc file. May raise SyntaxError or other errors generated by the compiler. """ if source and source[-1] != '\n': source = source + '\n' code = __builtin__.compile(source, filename.toOsSpecific(), 'exec') # try to cache the compiled code pycFilename = Filename(filename) pycFilename.setExtension(compiledExtensions[0]) try: f = open(pycFilename.toOsSpecific(), 'wb') except IOError: pass else: f.write(imp.get_magic()) if sys.version_info >= (3, 0): f.write((self.timestamp & 0xffffffff).to_bytes(4, 'little')) f.write(b'\0\0\0\0') else: f.write(chr(self.timestamp & 0xff) + chr((self.timestamp >> 8) & 0xff) + chr((self.timestamp >> 16) & 0xff) + chr((self.timestamp >> 24) & 0xff)) f.write(marshal.dumps(code)) f.close() return code class VFSSharedImporter: """ This is a special importer that is added onto the meta_path list, so that it is called before sys.path is traversed. It uses special logic to load one of the "shared" packages, by searching the entire sys.path for all instances of this shared package, and merging them. """ def __init__(self): pass def find_module(self, fullname, path = None, reload = False): #print >>sys.stderr, "shared find_module(%s), path = %s" % (fullname, path) if fullname not in sharedPackages: # Not a shared package; fall back to normal import. return None if path is None: path = sys.path excludePaths = [] if reload: # If reload is true, we are simply reloading the module, # looking for new paths to add. mod = sys.modules[fullname] excludePaths = getattr(mod, '_vfs_shared_path', None) if excludePaths is None: # If there isn't a _vfs_shared_path symbol already, # the module must have been loaded through # conventional means. Try to guess which path it was # found on. d = self.getLoadedDirname(mod) excludePaths = [d] loaders = [] for dir in path: if dir in excludePaths: continue importer = sys.path_importer_cache.get(dir, None) if importer is None: try: importer = VFSImporter(dir) except ImportError: continue sys.path_importer_cache[dir] = importer try: loader = importer.find_module(fullname) if not loader: continue except ImportError: continue loaders.append(loader) if not loaders: return None return VFSSharedLoader(loaders, reload = reload) def getLoadedDirname(self, mod): """ Returns the directory name that the indicated conventionally-loaded module must have been loaded from. """ if not getattr(mod, '__file__', None): return None fullname = mod.__name__ dirname = Filename.fromOsSpecific(mod.__file__).getDirname() parentname = None basename = fullname if '.' in fullname: parentname, basename = fullname.rsplit('.', 1) path = None if parentname: parent = sys.modules[parentname] path = parent.__path__ if path is None: path = sys.path for dir in path: pdir = str(Filename.fromOsSpecific(dir)) if pdir + '/' + basename == dirname: # We found it! return dir # Couldn't figure it out. return None class VFSSharedLoader: """ The second part of VFSSharedImporter, this imports a list of packages and combines them. """ def __init__(self, loaders, reload): self.loaders = loaders self.reload = reload def load_module(self, fullname): #print >>sys.stderr, "shared load_module(%s), loaders = %s" % (fullname, map(lambda l: l.dir_path, self.loaders)) mod = None message = None path = [] vfs_shared_path = [] if self.reload: mod = sys.modules[fullname] path = mod.__path__ or [] if path == fullname: # Work around Python bug setting __path__ of frozen modules. path = [] vfs_shared_path = getattr(mod, '_vfs_shared_path', []) for loader in self.loaders: try: mod = loader.load_module(fullname, loadingShared = True) except ImportError: etype, evalue, etraceback = sys.exc_info() print "%s on %s: %s" % (etype.__name__, fullname, evalue) if not message: message = '%s: %s' % (fullname, evalue) continue for dir in getattr(mod, '__path__', []): if dir not in path: path.append(dir) if mod is None: # If all of them failed to load, raise ImportError. raise ImportError(message) # If at least one of them loaded successfully, return the # union of loaded modules. mod.__path__ = path mod.__package__ = fullname # Also set this special symbol, which records that this is a # shared package, and also lists the paths we have already # loaded. mod._vfs_shared_path = vfs_shared_path + [l.dir_path for l in self.loaders] return mod _registered = False def register(): """ Register the VFSImporter on the path_hooks, if it has not already been registered, so that future Python import statements will vector through here (and therefore will take advantage of Panda's virtual file system). """ global _registered if not _registered: _registered = True sys.path_hooks.insert(0, VFSImporter) sys.meta_path.insert(0, VFSSharedImporter()) # Blow away the importer cache, so we'll come back through the # VFSImporter for every folder in the future, even those # folders that previously were loaded directly. sys.path_importer_cache = {} def reloadSharedPackage(mod): """ Reloads the specific module as a shared package, adding any new directories that might have appeared on the search path. """ fullname = mod.__name__ path = None if '.' in fullname: parentname = fullname.rsplit('.', 1)[0] parent = sys.modules[parentname] path = parent.__path__ importer = VFSSharedImporter() loader = importer.find_module(fullname, path = path, reload = True) if loader: loader.load_module(fullname) # Also force any child packages to become shared packages, if # they aren't already. for basename, child in mod.__dict__.items(): if isinstance(child, types.ModuleType): childname = child.__name__ if childname == fullname + '.' + basename and \ hasattr(child, '__path__') and \ childname not in sharedPackages: sharedPackages[childname] = True reloadSharedPackage(child) def reloadSharedPackages(): """ Walks through the sharedPackages list, and forces a reload of any modules on that list that have already been loaded. This allows new directories to be added to the search path. """ #print >> sys.stderr, "reloadSharedPackages, path = %s, sharedPackages = %s" % (sys.path, sharedPackages.keys()) # Sort the list, just to make sure parent packages are reloaded # before child packages are. for fullname in sorted(sharedPackages.keys()): mod = sys.modules.get(fullname, None) if not mod: continue reloadSharedPackage(mod)
{ "repo_name": "Wilee999/panda3d", "path": "direct/src/showbase/VFSImporter.py", "copies": "1", "size": "19746", "license": "bsd-3-clause", "hash": -5230235690328038000, "line_mean": 35.8395522388, "line_max": 121, "alpha_frac": 0.5723184442, "autogenerated": false, "ratio": 4.27125243348475, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0037661191290915283, "num_lines": 536 }
__all__ = ['register', 'sharedPackages', 'reloadSharedPackage', 'reloadSharedPackages'] from panda3d.core import Filename, VirtualFileSystem, VirtualFileMountSystem, OFileStream, copyStream import sys import marshal import imp import types import __builtin__ # The sharedPackages dictionary lists all of the "shared packages", # special Python packages that automatically span multiple directories # via magic in the VFSImporter. You can make a package "shared" # simply by adding its name into this dictionary (and then calling # reloadSharedPackages() if it's already been imported). # When a package name is in this dictionary at import time, *all* # instances of the package are located along sys.path, and merged into # a single Python module with a __path__ setting that represents the # union. Thus, you can have a direct.showbase.foo in your own # application, and loading it won't shadow the system # direct.showbase.ShowBase which is in a different directory on disk. sharedPackages = {} vfs = VirtualFileSystem.getGlobalPtr() # Possible file types. FTPythonSource = 0 FTPythonCompiled = 1 FTExtensionModule = 2 FTFrozenModule = 3 compiledExtensions = [ 'pyc', 'pyo' ] if not __debug__: # In optimized mode, we prefer loading .pyo files over .pyc files. # We implement that by reversing the extension names. compiledExtensions = [ 'pyo', 'pyc' ] class VFSImporter: """ This class serves as a Python importer to support loading Python .py and .pyc/.pyo files from Panda's Virtual File System, which allows loading Python source files from mounted .mf files (among other places). """ def __init__(self, path): self.dir_path = Filename.fromOsSpecific(path) def find_module(self, fullname, path = None): if path is None: dir_path = self.dir_path else: dir_path = path #print >>sys.stderr, "find_module(%s), dir_path = %s" % (fullname, dir_path) basename = fullname.split('.')[-1] path = Filename(dir_path, basename) # First, look for Python files. filename = Filename(path) filename.setExtension('py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, FTPythonSource) # If there's no .py file, but there's a .pyc file, load that # anyway. for ext in compiledExtensions: filename = Filename(path) filename.setExtension(ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, FTPythonCompiled) # Look for a C/C++ extension module. for desc in imp.get_suffixes(): if desc[2] != imp.C_EXTENSION: continue filename = Filename(path) filename.setExtension(desc[0][1:]) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, FTExtensionModule, desc = desc) # Finally, consider a package, i.e. a directory containing # __init__.py. filename = Filename(path, '__init__.py') vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, FTPythonSource, packagePath = path) for ext in compiledExtensions: filename = Filename(path, '__init__.' + ext) vfile = vfs.getFile(filename, True) if vfile: return VFSLoader(dir_path, vfile, filename, FTPythonCompiled, packagePath = path) #print >>sys.stderr, "not found." return None class VFSLoader: """ The second part of VFSImporter, this is created for a particular .py file or directory. """ def __init__(self, dir_path, vfile, filename, fileType, desc = None, packagePath = None): self.dir_path = dir_path self.timestamp = None if vfile: self.timestamp = vfile.getTimestamp() self.filename = filename self.fileType = fileType self.desc = desc self.packagePath = packagePath def load_module(self, fullname, loadingShared = False): #print >>sys.stderr, "load_module(%s), dir_path = %s, filename = %s" % (fullname, self.dir_path, self.filename) if self.fileType == FTFrozenModule: return self._import_frozen_module(fullname) if self.fileType == FTExtensionModule: return self._import_extension_module(fullname) # Check if this is a child of a shared package. if not loadingShared and self.packagePath and '.' in fullname: parentname = fullname.rsplit('.', 1)[0] if parentname in sharedPackages: # It is. That means it's a shared package too. parent = sys.modules[parentname] path = getattr(parent, '__path__', None) importer = VFSSharedImporter() sharedPackages[fullname] = True loader = importer.find_module(fullname, path = path) assert loader return loader.load_module(fullname) code = self._read_code() if not code: raise ImportError, 'No Python code in %s' % (fullname) mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) mod.__file__ = self.filename.toOsSpecific() mod.__loader__ = self if self.packagePath: mod.__path__ = [self.packagePath.toOsSpecific()] #print >> sys.stderr, "loaded %s, path = %s" % (fullname, mod.__path__) exec(code, mod.__dict__) return sys.modules[fullname] def getdata(self, path): path = Filename(self.dir_path, Filename.fromOsSpecific(path)) vfile = vfs.getFile(path) if not vfile: raise IOError return vfile.readFile(True) def is_package(self, fullname): return bool(self.packagePath) def get_code(self, fullname): return self._read_code() def get_source(self, fullname): return self._read_source() def get_filename(self, fullname): return self.filename.toOsSpecific() def _read_source(self): """ Returns the Python source for this file, if it is available, or None if it is not. May raise IOError. """ if self.fileType == FTPythonCompiled or \ self.fileType == FTExtensionModule: return None filename = Filename(self.filename) filename.setExtension('py') filename.setText() vfile = vfs.getFile(filename) if not vfile: raise IOError return vfile.readFile(True) def _import_extension_module(self, fullname): """ Loads the binary shared object as a Python module, and returns it. """ vfile = vfs.getFile(self.filename, False) # We can only import an extension module if it already exists on # disk. This means if it's a truly virtual file that has no # on-disk equivalent, we have to write it to a temporary file # first. if hasattr(vfile, 'getMount') and \ isinstance(vfile.getMount(), VirtualFileMountSystem): # It's a real file. filename = self.filename elif self.filename.exists(): # It's a virtual file, but it's shadowing a real file in # the same directory. Assume they're the same, and load # the real one. filename = self.filename else: # It's a virtual file with no real-world existence. Dump # it to disk. TODO: clean up this filename. filename = Filename.temporary('', self.filename.getBasenameWoExtension(), '.' + self.filename.getExtension(), type = Filename.TDso) filename.setExtension(self.filename.getExtension()) filename.setBinary() sin = vfile.openReadFile(True) sout = OFileStream() if not filename.openWrite(sout): raise IOError if not copyStream(sin, sout): raise IOError vfile.closeReadFile(sin) del sout module = imp.load_module(fullname, None, filename.toOsSpecific(), self.desc) module.__file__ = self.filename.toOsSpecific() return module def _import_frozen_module(self, fullname): """ Imports the frozen module without messing around with searching any more. """ #print >>sys.stderr, "importing frozen %s" % (fullname) module = imp.load_module(fullname, None, fullname, ('', '', imp.PY_FROZEN)) return module def _read_code(self): """ Returns the Python compiled code object for this file, if it is available, or None if it is not. May raise IOError, ValueError, SyntaxError, or a number of other errors generated by the low-level system. """ if self.fileType == FTPythonCompiled: # It's a pyc file; just read it directly. pycVfile = vfs.getFile(self.filename, False) if pycVfile: return self._loadPyc(pycVfile, None) raise IOError, 'Could not read %s' % (self.filename) elif self.fileType == FTExtensionModule: return None # It's a .py file (or an __init__.py file; same thing). Read # the .pyc file if it is available and current; otherwise read # the .py file and compile it. t_pyc = None for ext in compiledExtensions: pycFilename = Filename(self.filename) pycFilename.setExtension(ext) pycVfile = vfs.getFile(pycFilename, False) if pycVfile: t_pyc = pycVfile.getTimestamp() break code = None if t_pyc and t_pyc >= self.timestamp: try: code = self._loadPyc(pycVfile, self.timestamp) except ValueError: code = None if not code: source = self._read_source() filename = Filename(self.filename) filename.setExtension('py') code = self._compile(filename, source) return code def _loadPyc(self, vfile, timestamp): """ Reads and returns the marshal data from a .pyc file. Raises ValueError if there is a problem. """ code = None data = vfile.readFile(True) if data[:4] == imp.get_magic(): t = ord(data[4]) + (ord(data[5]) << 8) + \ (ord(data[6]) << 16) + (ord(data[7]) << 24) if not timestamp or t == timestamp: code = marshal.loads(data[8:]) else: raise ValueError, 'Timestamp wrong on %s' % (vfile) else: raise ValueError, 'Bad magic number in %s' % (vfile) return code def _compile(self, filename, source): """ Compiles the Python source code to a code object and attempts to write it to an appropriate .pyc file. May raise SyntaxError or other errors generated by the compiler. """ if source and source[-1] != '\n': source = source + '\n' code = __builtin__.compile(source, filename.toOsSpecific(), 'exec') # try to cache the compiled code pycFilename = Filename(filename) pycFilename.setExtension(compiledExtensions[0]) try: f = open(pycFilename.toOsSpecific(), 'wb') except IOError: pass else: f.write('\0\0\0\0') f.write(chr(self.timestamp & 0xff) + chr((self.timestamp >> 8) & 0xff) + chr((self.timestamp >> 16) & 0xff) + chr((self.timestamp >> 24) & 0xff)) f.write(marshal.dumps(code)) f.flush() f.seek(0, 0) f.write(imp.get_magic()) f.close() return code class VFSSharedImporter: """ This is a special importer that is added onto the meta_path list, so that it is called before sys.path is traversed. It uses special logic to load one of the "shared" packages, by searching the entire sys.path for all instances of this shared package, and merging them. """ def __init__(self): pass def find_module(self, fullname, path = None, reload = False): #print >>sys.stderr, "shared find_module(%s), path = %s" % (fullname, path) if fullname not in sharedPackages: # Not a shared package; fall back to normal import. return None if path is None: path = sys.path excludePaths = [] if reload: # If reload is true, we are simply reloading the module, # looking for new paths to add. mod = sys.modules[fullname] excludePaths = getattr(mod, '_vfs_shared_path', None) if excludePaths is None: # If there isn't a _vfs_shared_path symbol already, # the module must have been loaded through # conventional means. Try to guess which path it was # found on. d = self.getLoadedDirname(mod) excludePaths = [d] loaders = [] for dir in path: if dir in excludePaths: continue importer = sys.path_importer_cache.get(dir, None) if importer is None: try: importer = VFSImporter(dir) except ImportError: continue sys.path_importer_cache[dir] = importer try: loader = importer.find_module(fullname) if not loader: continue except ImportError: continue loaders.append(loader) if not loaders: return None return VFSSharedLoader(loaders, reload = reload) def getLoadedDirname(self, mod): """ Returns the directory name that the indicated conventionally-loaded module must have been loaded from. """ if not hasattr(mod, __file__) or mod.__file__ is None: return None fullname = mod.__name__ dirname = Filename.fromOsSpecific(mod.__file__).getDirname() parentname = None basename = fullname if '.' in fullname: parentname, basename = fullname.rsplit('.', 1) path = None if parentname: parent = sys.modules[parentname] path = parent.__path__ if path is None: path = sys.path for dir in path: pdir = str(Filename.fromOsSpecific(dir)) if pdir + '/' + basename == dirname: # We found it! return dir # Couldn't figure it out. return None class VFSSharedLoader: """ The second part of VFSSharedImporter, this imports a list of packages and combines them. """ def __init__(self, loaders, reload): self.loaders = loaders self.reload = reload def load_module(self, fullname): #print >>sys.stderr, "shared load_module(%s), loaders = %s" % (fullname, map(lambda l: l.dir_path, self.loaders)) mod = None message = None path = [] vfs_shared_path = [] if self.reload: mod = sys.modules[fullname] path = mod.__path__ or [] vfs_shared_path = getattr(mod, '_vfs_shared_path', []) for loader in self.loaders: try: mod = loader.load_module(fullname, loadingShared = True) except ImportError: etype, evalue, etraceback = sys.exc_info() print "%s on %s: %s" % (etype.__name__, fullname, evalue) if not message: message = '%s: %s' % (fullname, evalue) continue for dir in getattr(mod, '__path__', []): if dir not in path: path.append(dir) if mod is None: # If all of them failed to load, raise ImportError. raise ImportError, message # If at least one of them loaded successfully, return the # union of loaded modules. mod.__path__ = path # Also set this special symbol, which records that this is a # shared package, and also lists the paths we have already # loaded. mod._vfs_shared_path = vfs_shared_path + [l.dir_path for l in self.loaders] return mod _registered = False def register(): """ Register the VFSImporter on the path_hooks, if it has not already been registered, so that future Python import statements will vector through here (and therefore will take advantage of Panda's virtual file system). """ global _registered if not _registered: _registered = True sys.path_hooks.insert(0, VFSImporter) sys.meta_path.insert(0, VFSSharedImporter()) # Blow away the importer cache, so we'll come back through the # VFSImporter for every folder in the future, even those # folders that previously were loaded directly. sys.path_importer_cache = {} def reloadSharedPackage(mod): """ Reloads the specific module as a shared package, adding any new directories that might have appeared on the search path. """ fullname = mod.__name__ path = None if '.' in fullname: parentname = fullname.rsplit('.', 1)[0] parent = sys.modules[parentname] path = parent.__path__ importer = VFSSharedImporter() loader = importer.find_module(fullname, path = path, reload = True) if loader: loader.load_module(fullname) # Also force any child packages to become shared packages, if # they aren't already. for basename, child in mod.__dict__.items(): if isinstance(child, types.ModuleType): childname = child.__name__ if childname == fullname + '.' + basename and \ hasattr(child, '__path__') and \ childname not in sharedPackages: sharedPackages[childname] = True reloadSharedPackage(child) def reloadSharedPackages(): """ Walks through the sharedPackages list, and forces a reload of any modules on that list that have already been loaded. This allows new directories to be added to the search path. """ #print >> sys.stderr, "reloadSharedPackages, path = %s, sharedPackages = %s" % (sys.path, sharedPackages.keys()) for fullname in sharedPackages.keys(): mod = sys.modules.get(fullname, None) if not mod: continue reloadSharedPackage(mod)
{ "repo_name": "sctigercat1/panda3d", "path": "direct/src/showbase/VFSImporter.py", "copies": "3", "size": "19101", "license": "bsd-3-clause", "hash": 2246397522527933000, "line_mean": 35.3828571429, "line_max": 121, "alpha_frac": 0.5791843359, "autogenerated": false, "ratio": 4.308820212046019, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6388004547946019, "avg_score": null, "num_lines": null }
__all__ = ["RegistMixin"] import peewee class RegistMixin: """将module注册到类内部,以便统一初始化 """ _regist_classes = {} @classmethod def regist(clz, target): """注册module的装饰器,也可以当作方法注册多对多关系表,如 NoteUserThrough = User.roles.get_through_model() AioOrm.regist(NoteUserThrough) """ clz._regist_classes[target.__name__] = target return target def create_tables(self, **kwargs): """创建表,将注册的model创建到数据库,后面可以添加关键字参数,用于创建对应表中的初始数据条目 """ app = self.app @app.listener('after_server_start') async def creat_db(app, loop): for name, target in type(self)._regist_classes.items(): print('create tabel', name) try: await target.create_table() except peewee.InternalError as ie: print(str(ie)) except AttributeError as ae: raise ae except Exception as e: raise e else: print('create tabel', name, 'done!') if kwargs: if kwargs.get(name) and (await target.select().count()) == 0: print(name, 'insert original data') iq = target.insert_many(kwargs.get(name)) try: result = await iq.execute() except Exception as e: print(name, 'insert original data error') print(str(e)) else: if result: print(name, 'insert original data succeed') else: print(name, 'insert original data failed')
{ "repo_name": "Sanic-Extensions/sanic-aioorm", "path": "sanic_aioorm/regist_mixin.py", "copies": "1", "size": "1983", "license": "mit", "hash": -2546417015517599000, "line_mean": 35.66, "line_max": 81, "alpha_frac": 0.4637206765, "autogenerated": false, "ratio": 3.875264270613108, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4838984947113108, "avg_score": null, "num_lines": null }
__all__ = ['registry_status'] import gevent.monkey gevent.monkey.patch_all() import cache import config import gevent import socket import storage import sys import toolkit from .app import app _config = config.load() def redis_status(): message = '' if not cache.redis_conn: cache.init() if not cache.redis_conn: return {'redis': 'unconfigured'} key = toolkit.gen_random_string() value = toolkit.gen_random_string() try: cache.redis_conn.setex(key, 5, value) if value != cache.redis_conn.get(key): message = 'Set value is different from what was received' except Exception: message = str(sys.exc_info()[1]) return {'redis': message} def storage_status(): message = '' try: _storage = storage.load(_config.storage) key = toolkit.gen_random_string() value = toolkit.gen_random_string() _storage.put_content(key, value) stored_value = _storage.get_content(key) _storage.remove(key) if value != stored_value: message = 'Set value is different from what was received' except Exception as e: message = str(e) return {'storage': message} @app.route('/_status') @app.route('/v1/_status') def registry_status(): retval = {'services': ['redis', 'storage'], 'failures': {}} retval['host'] = socket.gethostname() code = 200 jobs = [gevent.spawn(job) for job in [redis_status, storage_status]] gevent.joinall(jobs, timeout=10) for job, service in zip(jobs, retval['services']): try: value = job.get() if value[service] != '': retval['failures'].update({service: value[service]}) code = 503 except Exception as e: retval['failures'].update({service: str(e)}) code = 503 return toolkit.response(retval, code=code)
{ "repo_name": "alephcloud/docker-registry", "path": "registry/status.py", "copies": "9", "size": "1910", "license": "apache-2.0", "hash": 1936035347766647000, "line_mean": 26.6811594203, "line_max": 72, "alpha_frac": 0.6041884817, "autogenerated": false, "ratio": 3.7896825396825395, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8893871021382539, "avg_score": null, "num_lines": null }
__all__ = ['regular_cube_complex'] from scipy import rank,ones import scipy from pydec.mesh import regular_cube_mesh from cube_array import cube_array_boundary class regular_cube_complex(list): """ Represents the complex for a regular_cube_mesh """ class complex_data: pass def __init__(self,mesh): if not isinstance(mesh,regular_cube_mesh): raise ValueError,'expected a regular_cube_mesh' self.mesh = mesh self.__construct_hierarchy() self.vertices = self[0].cube_array.astype(float) #self.__test() def __repr__(self): output = "" output += "regular_cube_complex:\n" output += " Shape: " + str(self.mesh.bitmap.shape) + "\n" output += " Complex:\n" for i in reversed(range(len(self))): output += " %10d: %2d-D cubes\n" % (self[i].cube_array.shape[0],i) return output def __test(self): #test boundary operator for prev,next in zip(self[:-1],self[1:]): assert((prev.boundary*next.boundary).nnz == 0) def chain_complex(self): return [lvl.boundary for lvl in self] def cochain_complex(self): return [lvl.boundary.T.tocsr() for lvl in self[1:]] + \ [scipy.sparse.csr_matrix((1,self[-1].cube_array.shape[0]),dtype='int8')] def complex_dimension(self): return self.mesh.dimension() def embedding_dimension(self): return self.mesh.dimension() def __construct_hierarchy(self): for i in range(self.complex_dimension() + 1): self.append(self.complex_data()) self[-1].cube_array = self.mesh.cube_array() for i in reversed(range(self.complex_dimension())): faces,boundary = cube_array_boundary(self[i+1].cube_array,i+1) self[i ].cube_array = faces self[i+1].boundary = boundary self[0].boundary = scipy.sparse.csr_matrix((1,self[0].cube_array.shape[0]),dtype='int8')
{ "repo_name": "ryanbressler/pydec", "path": "pydec/dec/regular_cube_complex.py", "copies": "6", "size": "2018", "license": "bsd-3-clause", "hash": 5863661431437727000, "line_mean": 29.5757575758, "line_max": 96, "alpha_frac": 0.5887016848, "autogenerated": false, "ratio": 3.6035714285714286, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7192273113371429, "avg_score": null, "num_lines": null }
__all__ = ['regular_cube_mesh'] from numpy import array,asarray,hstack,vstack,arange,zeros,rank class regular_cube_mesh: """ A regular grid of hypercubes. Examples: # create a 2x2 cube mesh bitmap = ones((2,2),dtype='bool') c_mesh = regular_cube_mesh(bitmap) # creates a 3x3 cube mesh with a center hole bitmap = ones((3,3),dtype='bool') bitmap[1,1] = False c_mesh = regular_cube_mesh(bitmap) # creates a 10x10x10 cube mesh with a center hole bitmap = ones((10,10,10),dtype='bool') bitmap[5,5,5] = False c_mesh = regular_cube_mesh(bitmap) """ def __init__(self,bitmap): self.bitmap = asarray(bitmap,dtype='bool') def cube_array(self): """ Return a cube array that represents this mesh's bitmap """ cubes = vstack(self.bitmap.nonzero()).transpose().astype('int32') cubes = hstack((cubes,zeros((cubes.shape[0],rank(self.bitmap)),dtype=cubes.dtype) + arange(rank(self.bitmap),dtype=cubes.dtype))) return cubes def dimension(self): return rank(self.bitmap)
{ "repo_name": "ryanbressler/pydec", "path": "pydec/mesh/regular_cube.py", "copies": "6", "size": "1141", "license": "bsd-3-clause", "hash": 8556890794243786000, "line_mean": 27.525, "line_max": 137, "alpha_frac": 0.6029798422, "autogenerated": false, "ratio": 3.5, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7102979842199999, "avg_score": null, "num_lines": null }
__all__ = ['relative_parity','permutation_parity'] def relative_parity(A,B): """Relative parity between two lists Parameters ---------- A,B : lists of elements Lists A and B must contain permutations of the same elements. Returns ------- parity : integer The parity is 0 if A differs from B by an even number of transpositions and 1 otherwise. Examples -------- >>> relative_parity( [0,1], [0,1] ) 0 >>> relative_parity( [0,1], [1,0] ) 1 >>> relative_parity( [0,1,2], [0,1,2] ) 0 >>> relative_parity( [0,1,2], [0,2,1] ) 1 >>> relative_parity( ['A','B','C'], ['A','B','C'] ) 0 >>> relative_parity( ['A','B','C'], ['A','C','B'] ) 1 """ if len(A) != len(B): raise ValueError("B is not a permutation of A") # represent each element in B with its index in A and run permutation_parity() A_indices = dict(zip(A,range(len(A)))) if len(A_indices) != len(A): raise ValueError("A contains duplicate values") try: perm = [A_indices[x] for x in B] except KeyError: raise ValueError("B is not a permutation of A") return permutation_parity(perm, check_input=False) def permutation_parity(perm, check_input=True): """Parity of a permutation of the integers Parameters ---------- perm : list of integers List containing a permutation of the integers 0...N Optional Parameters ------------------- check_input : boolean If True, check whether the input is a valid permutation. Returns ------- parity : integer The parity is 0 if perm differs from range(len(perm)) by an even number of transpositions and 1 otherwise. Examples -------- >>> permutation_parity( [0,1,2] ) 0 >>> permutation_parity( [0,2,1] ) 1 >>> permutation_parity( [1,0,2] ) 1 >>> permutation_parity( [1,2,0] ) 0 >>> permutation_parity( [2,0,1] ) 0 >>> permutation_parity( [0,1,3,2] ) 1 """ n = len(perm) if check_input: rangen = range(n) if sorted(perm) != rangen: raise ValueError("Invalid input") # Decompose into disjoint cycles. We only need to # count the number of cycles to determine the parity num_cycles = 0 seen = set() for i in xrange(n): if i in seen: continue num_cycles += 1 j = i while True: assert j not in seen seen.add(j) j = perm[j] if j == i: break return (n - num_cycles) % 2
{ "repo_name": "alejospina/pydec", "path": "pydec/math/parity.py", "copies": "6", "size": "2696", "license": "bsd-3-clause", "hash": 68631156158771000, "line_mean": 23.2882882883, "line_max": 82, "alpha_frac": 0.5278189911, "autogenerated": false, "ratio": 3.6830601092896176, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.03386389284003791, "num_lines": 111 }
__all__ = ['remove_file', 'remove_files', 'zip_files'] import os import glob import zipfile try: import zlib except: DEFAULT_ZIP_MODE = zipfile.ZIP_STORED else: DEFAULT_ZIP_MODE = zipfile.ZIP_DEFLATED def merge_files(dst, src_glob, nbytes=1 << 30): """Merge src files to dst. Only works on Linux. :type dst: str :param dst: Destination path :type src_glob: str :param src_glob: Source glob pattern. :type nbytes: str :param nbytes: Number of bytes sent per iteration. """ paths = sorted(glob.iglob(src_glob)) if paths: OVERWRITE_FLAGS = os.O_WRONLY|os.O_CREAT|os.O_TRUNC dest_fd = os.open(dst, flags=OVERWRITE_FLAGS, mode=0o666) for path in paths: src_fd = os.open(path, os.O_RDONLY) while os.sendfile(dest_fd, src_fd, offset=None, count=nbytes) != 0: pass os.remove(path) def remove_file(path): try: os.remove(path) except FileNotFoundError: pass def remove_files(glob_pattern='', paths=()): if glob_pattern: paths = glob.iglob(glob_pattern) for path in paths: remove_file(path) def zip_files(dest, glob_pattern='', paths=(), mode='w', compression=DEFAULT_ZIP_MODE): if glob_pattern: paths = glob.iglob(glob_pattern) with zipfile.ZipFile(dest, mode=mode, compression=compression) as zipper: for path in paths: filename = os.path.split(path)[1] zipper.write(path, arcname=filename)
{ "repo_name": "hans-t/autos", "path": "autos/utils/file.py", "copies": "1", "size": "1522", "license": "mit", "hash": 7451298041347843000, "line_mean": 23.9508196721, "line_max": 87, "alpha_frac": 0.6195795007, "autogenerated": false, "ratio": 3.412556053811659, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9518225966831579, "avg_score": 0.0027819175360158962, "num_lines": 61 }
__all__ = ['Renderbuffer'] class Renderbuffer: ''' Renderbuffer objects are OpenGL objects that contain images. They are created and used specifically with :py:class:`Framebuffer` objects. They are optimized for use as render targets, while :py:class:`Texture` objects may not be, and are the logical choice when you do not need to sample from the produced image. If you need to resample, use Textures instead. Renderbuffer objects also natively accommodate multisampling. A Renderbuffer object cannot be instantiated directly, it requires a context. Use :py:meth:`Context.renderbuffer` or :py:meth:`Context.depth_renderbuffer` to create one. ''' __slots__ = ['mglo', '_size', '_components', '_samples', '_depth', '_dtype', '_glo', 'ctx', 'extra'] def __init__(self): self.mglo = None #: Internal representation for debug purposes only. self._size = (None, None) self._components = None self._samples = None self._depth = None self._dtype = None self._glo = None self.ctx = None #: The context this object belongs to self.extra = None #: Any - Attribute for storing user defined objects raise TypeError() def __repr__(self): return '<Renderbuffer: %d>' % self.glo def __eq__(self, other): return type(self) is type(other) and self.mglo is other.mglo @property def width(self) -> int: ''' int: The width of the renderbuffer. ''' return self._size[0] @property def height(self) -> int: ''' int: The height of the renderbuffer. ''' return self._size[1] @property def size(self) -> tuple: ''' tuple: The size of the renderbuffer. ''' return self._size @property def samples(self) -> int: ''' int: The samples of the renderbuffer. ''' return self._samples @property def components(self) -> int: ''' int: The components of the renderbuffer. ''' return self._components @property def depth(self) -> bool: ''' bool: Is the renderbuffer a depth renderbuffer? ''' return self._depth @property def dtype(self) -> str: ''' str: Data type. ''' return self._dtype @property def glo(self) -> int: ''' int: The internal OpenGL object. This values is provided for debug purposes only. ''' return self._glo def release(self) -> None: ''' Release the ModernGL object. ''' self.mglo.release()
{ "repo_name": "cprogrammer1994/ModernGL", "path": "moderngl/renderbuffer.py", "copies": "1", "size": "2797", "license": "mit", "hash": 1671528627484556000, "line_mean": 24.8981481481, "line_max": 104, "alpha_frac": 0.5563103325, "autogenerated": false, "ratio": 4.43968253968254, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.549599287218254, "avg_score": null, "num_lines": null }
__all__ = [ "render" ] import vtk import numpy as np import sys from scipy.stats.mstats import mquantiles import scipy.interpolate from vtk.util.vtkConstants import * import matplotlib.pyplot as plt def numpy2VTK(img,spacing=[1.0,1.0,1.0]): # evolved from code from Stou S., # on http://www.siafoo.net/snippet/314 importer = vtk.vtkImageImport() img_data = img.astype('uint8') img_string = img_data.tostring() # type short dim = img.shape importer.CopyImportVoidPointer(img_string, len(img_string)) importer.SetDataScalarType(VTK_UNSIGNED_CHAR) importer.SetNumberOfScalarComponents(1) extent = importer.GetDataExtent() importer.SetDataExtent(extent[0], extent[0] + dim[2] - 1, extent[2], extent[2] + dim[1] - 1, extent[4], extent[4] + dim[0] - 1) importer.SetWholeExtent(extent[0], extent[0] + dim[2] - 1, extent[2], extent[2] + dim[1] - 1, extent[4], extent[4] + dim[0] - 1) importer.SetDataSpacing( spacing[0], spacing[1], spacing[2]) importer.SetDataOrigin( 0,0,0 ) return importer def volumeRender(img, tf=[],spacing=[1.0,1.0,1.0]): importer = numpy2VTK(img,spacing) # Transfer Functions opacity_tf = vtk.vtkPiecewiseFunction() color_tf = vtk.vtkColorTransferFunction() if len(tf) == 0: tf.append([img.min(),0,0,0,0]) tf.append([img.max(),1,1,1,1]) for p in tf: color_tf.AddRGBPoint(p[0], p[1], p[2], p[3]) opacity_tf.AddPoint(p[0], p[4]) volMapper = vtk.vtkGPUVolumeRayCastMapper() volMapper.SetInputConnection(importer.GetOutputPort()) # The property describes how the data will look volProperty = vtk.vtkVolumeProperty() volProperty.SetColor(color_tf) volProperty.SetScalarOpacity(opacity_tf) volProperty.ShadeOn() volProperty.SetInterpolationTypeToLinear() vol = vtk.vtkVolume() vol.SetMapper(volMapper) vol.SetProperty(volProperty) return [vol] def vtk_basic( actors ): """ Create a window, renderer, interactor, add the actors and start the thing Parameters ---------- actors : list of vtkActors Returns ------- nothing """ # create a rendering window and renderer ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(600,600) # ren.SetBackground( 1, 1, 1) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) for a in actors: # assign actor to the renderer ren.AddActor(a ) # render renWin.Render() # enable user interface interactor iren.Initialize() iren.Start() def render( img, tf=None, colors=None, opacity=None ): """ tf: transfert function of the form [[voxel_value, r, g, b, opacity]] """ data = img.rescale().get_data(dtype='uint8') if colors is not None: tf = plt.cm.get_cmap(colors, 256)(range(256))*255 tf = np.hstack( (np.arange(256).reshape(256,1), tf[:,:3]) ) if opacity is not None: opacity = np.array(opacity) x = opacity[:,0] y = opacity[:,1] f = scipy.interpolate.interp1d(x, y, kind='linear') tf = np.hstack( (tf, f(range(256)).reshape((256,1)) ) ) else: tf = np.hstack( (tf, np.linspace(0,1,len(tf)).reshape(len(tf),1), np.linspace(0,1,len(tf)).reshape(len(tf),1) ) ) if tf is None: q = mquantiles(data.flatten(),[0.5,0.98]) q[0]=max(q[0],1) q[1] = max(q[1],1) tf=[[0,0,0,0,0],[q[0],0,0,0,0],[q[1],1,1,1,0.5],[data.max(),1,1,1,1]] actor_list = volumeRender(data, tf=tf, spacing=img.header['pixelSize'][:3]) vtk_basic(actor_list)
{ "repo_name": "ghisvail/irtk-legacy", "path": "wrapping/cython/irtk/ext/Show3D.py", "copies": "5", "size": "3940", "license": "bsd-3-clause", "hash": -7232199457749203000, "line_mean": 28.6240601504, "line_max": 79, "alpha_frac": 0.5898477157, "autogenerated": false, "ratio": 3.2669983416252073, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.031692877693558226, "num_lines": 133 }
__all__ = ["render_surface_function"] import os import warnings import skimage.measure import webcolors as wc from ..registration import resample_image try: import chart_studio.plotly as py from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from plotly.graph_objs import * from plotly import figure_factory as FF except: warnings.warn( "Cant import Plotly. Install it `pip install chart_studio` if you want to use ants.render_surface_function" ) def render_surface_function( surfimg, funcimg=None, alphasurf=0.2, alphafunc=1.0, isosurf=0.5, isofunc=0.5, smoothsurf=None, smoothfunc=None, cmapsurf="grey", cmapfunc="red", filename=None, notebook=False, auto_open=False, ): """ Render an image as a base surface and an optional collection of other image. ANTsR function: `renderSurfaceFunction` NOTE: The ANTsPy version of this function is actually completely different than the ANTsR version, although they should produce similar results. Arguments --------- surfimg : ANTsImage Input image to use as rendering substrate. funcimg : ANTsImage Input list of images to use as functional overlays. alphasurf : scalar alpha for the surface contour alphafunc : scalar alpha value for functional blobs isosurf : scalar intensity level that defines lower threshold for surface image isofunc : scalar intensity level that defines lower threshold for functional image smoothsurf : scalar (optional) smoothing for the surface image smoothfunc : scalar (optional) smoothing for the functional image cmapsurf : string color map for surface image cmapfunc : string color map for functional image filename : string where to save rendering. if None, will plot interactively notebook : boolean whether you're in a jupyter notebook. Returns ------- N/A Example ------- >>> import ants >>> mni = ants.image_read(ants.get_ants_data('mni')) >>> mnia = ants.image_read(ants.get_ants_data('mnia')) >>> ants.render_surface_function(mni, mnia, alphasurf=0.1, filename='/users/ncullen/desktop/surffnc.png') """ cmap_dict = { "grey": "Greys", "gray": "Greys", "red": "Reds", "green": "Greens", "jet": "Jet", } if surfimg.dimension != 3: raise ValueError("surfimg must be 3D") # if (filename is None) and (not notebook_render): # raise Exception('Must either 1) give filename, 2) set `html_render`=True or 3) set `notebook_render`=True') if notebook: init_notebook_mode(connected=True) fig_list = [] fig_data_list = [] surfimg = resample_image(surfimg, (3, 3, 3)) surfimg_arr = surfimg.numpy() surfverts, surffaces, _, _ = skimage.measure.marching_cubes_lewiner( surfimg_arr, isosurf, spacing=(1, 1, 1) ) surffig = FF.create_trisurf( x=surfverts[:, 0], y=surfverts[:, 1], z=surfverts[:, 2], colormap=cmap_dict.get(cmapsurf, cmapsurf), plot_edges=False, simplices=surffaces, ) surffig["data"][0].update(opacity=alphasurf) fig_list.append(surffig) fig_data_list.append(surffig.data[0]) if funcimg is not None: if not isinstance(funcimg, (tuple, list)): funcimg = [funcimg] if not isinstance(alphafunc, (tuple, list)): alphafunc = [alphafunc] * len(funcimg) if not isinstance(isofunc, (tuple, list)): isofunc = [isofunc] * len(funcimg) if not isinstance(cmapfunc, (tuple, list)): cmapfunc = [cmapfunc] * len(funcimg) # cmapfunc = [cmap_dict.get(c,c) for c in cmapfunc] for i in range(len(cmapfunc)): cmapfunc[i] = "rgb%s" % str(wc.name_to_rgb(cmapfunc[i])) cmapfunc[i] = [cmapfunc[i]] * 2 for func_idx, fimg in enumerate(funcimg): if fimg.dimension != 3: raise ValueError("all funcimgs must be 3D") fimg = resample_image(fimg, (3, 3, 3)) funcimg_arr = fimg.numpy() funcverts, funcfaces, _, _ = skimage.measure.marching_cubes_lewiner( funcimg_arr, isofunc[func_idx], spacing=(1, 1, 1) ) funcfig = FF.create_trisurf( x=funcverts[:, 0], y=funcverts[:, 1], z=funcverts[:, 2], plot_edges=False, simplices=funcfaces, colormap=cmapfunc[func_idx], ) funcfig["data"][0].update(opacity=alphafunc[func_idx]) fig_list.append(funcfig) fig_data_list.append(funcfig.data[0]) if filename is not None: save_file = "png" image_filename = filename filename = image_filename.split(".")[0] + ".html" else: image_filename = "ants_plot" filename = "ants_plot.html" save_file = None try: plot( fig_data_list, image=save_file, filename=filename, image_filename=image_filename, auto_open=auto_open, ) except PermissionError: print( "PermissionError caught - are you running jupyter console? Try launching it with sudo privledges (e.g. `sudo jupyter-console`)" )
{ "repo_name": "ANTsX/ANTsPy", "path": "ants/viz/render_surface_function.py", "copies": "1", "size": "5503", "license": "apache-2.0", "hash": -1962030499097000400, "line_mean": 28.5860215054, "line_max": 139, "alpha_frac": 0.5971288388, "autogenerated": false, "ratio": 3.654050464807437, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9745605751532627, "avg_score": 0.001114710414962056, "num_lines": 186 }
__all__ = ["RequestHandler"] class RequestHandler(object): """an abstract request handler. provide a handle() method and a handle_error() method. handle() accepts a request object (as returned by YourTCPConnectionSubclass.get_request). handle_error() accepts the standard 3 exception arguments (type, exception, traceback). both handle() and handle_error should return a two-tuple of response iterable and a metadata object. the metadata object can be anything, it will just be passed as the metadata parameter to YourTCPConectionSubclass.log_access() if the response iterable is longer than 1 string, the connection will pause between sending each chunk to allow other coroutines to run. it is perfectly allowable to have the response iterable be a generator or other lazy iterator so as to not block the whole server while you generate a long response and hold the entire thing in memory. each string in the response iterable will be sent in a separate socket.sendall() call, so keep that in mind -- for instance you may want to chunk up large files yourself rather than just returning the file object, since the lines may be shorter than a good chunk size. """ def __init__(self, client_address, server_address, connection): self.client_address = client_address self.server_address = server_address self.connection = connection self.server = connection.server def handle(self, request): raise NotImplementedError() def handle_error(self, klass, exc, tb): raise NotImplementedError()
{ "repo_name": "teepark/feather", "path": "feather/requests.py", "copies": "1", "size": "1632", "license": "bsd-3-clause", "hash": 956622090750295900, "line_mean": 39.8, "line_max": 79, "alpha_frac": 0.7218137255, "autogenerated": false, "ratio": 4.771929824561403, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5993743550061403, "avg_score": null, "num_lines": null }
__all__ = ('Request', 'StreamResponse', 'Response') import asyncio import binascii import cgi import collections import http.cookies import io import json import warnings from urllib.parse import urlsplit, parse_qsl, unquote from types import MappingProxyType from . import hdrs from .helpers import reify from .multidict import (CIMultiDictProxy, CIMultiDict, MultiDictProxy, MultiDict) from aio2py.required.aiohttp.protocol import Response as ResponseImpl, HttpVersion10 from .streams import EOF_MARKER sentinel = object() class HeadersMixin: _content_type = None _content_dict = None _stored_content_type = sentinel def _parse_content_type(self, raw): self._stored_content_type = raw if raw is None: # default value according to RFC 2616 self._content_type = 'application/octet-stream' self._content_dict = {} else: self._content_type, self._content_dict = cgi.parse_header(raw) @property def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE): """The value of content part for Content-Type HTTP header.""" raw = self.headers.get(_CONTENT_TYPE) if self._stored_content_type != raw: self._parse_content_type(raw) return self._content_type @property def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE): """The value of charset part for Content-Type HTTP header.""" raw = self.headers.get(_CONTENT_TYPE) if self._stored_content_type != raw: self._parse_content_type(raw) return self._content_dict.get('charset') @property def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH): """The value of Content-Length HTTP header.""" l = self.headers.get(_CONTENT_LENGTH) if l is None: return None else: return int(l) FileField = collections.namedtuple('Field', 'name filename file content_type') ############################################################ # HTTP Request ############################################################ class Request(dict, HeadersMixin): POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT, hdrs.METH_TRACE, hdrs.METH_DELETE} def __init__(self, app, message, payload, transport, reader, writer, *, _HOST=hdrs.HOST, secure_proxy_ssl_header=None): self._app = app self._version = message.version self._transport = transport self._reader = reader self._writer = writer self._method = message.method self._host = message.headers.get(_HOST) self._path_qs = message.path res = urlsplit(message.path) self._path = unquote(res.path) self._query_string = res.query self._post = None self._post_files_cache = None self._headers = CIMultiDictProxy(message.headers) if self._version < HttpVersion10: self._keep_alive = False else: self._keep_alive = not message.should_close # matchdict, route_name, handler # or information about traversal lookup self._match_info = None # initialized after route resolving self._payload = payload self._cookies = None self._read_bytes = None self._has_body = not payload.at_eof() self._secure_proxy_ssl_header = secure_proxy_ssl_header @property def scheme(self): """A string representing the scheme of the request. 'http' or 'https'. """ if self._transport.get_extra_info('sslcontext'): return 'https' secure_proxy_ssl_header = self._secure_proxy_ssl_header if secure_proxy_ssl_header is not None: header, value = secure_proxy_ssl_header if self._headers.get(header) == value: return 'https' return 'http' @property def method(self): """Read only property for getting HTTP method. The value is upper-cased str like 'GET', 'POST', 'PUT' etc. """ return self._method @property def version(self): """Read only property for getting HTTP version of request. Returns aiohttp.protocol.HttpVersion instance. """ return self._version @property def host(self): """Read only property for getting *HOST* header of request. Returns str or None if HTTP request has no HOST header. """ return self._host @property def path_qs(self): """The URL including PATH_INFO and the query string. E.g, /app/blog?id=10 """ return self._path_qs @property def path(self): """The URL including *PATH INFO* without the host or scheme. E.g., ``/app/blog`` """ return self._path @property def query_string(self): """The query string in the URL. E.g., id=10 """ return self._query_string @reify def GET(self): """A multidict with all the variables in the query string. Lazy property. """ return MultiDictProxy(MultiDict(parse_qsl(self._query_string))) @reify def POST(self): """A multidict with all the variables in the POST parameters. post() methods has to be called before using this attribute. """ if self._post is None: raise RuntimeError("POST is not available before post()") return self._post @property def headers(self): """A case-insensitive multidict proxy with all headers.""" return self._headers @property def keep_alive(self): """Is keepalive enabled by client?""" return self._keep_alive @property def match_info(self): """Result of route resolving.""" return self._match_info @property def app(self): """Application instance.""" return self._app @property def transport(self): """Transport used for request processing.""" return self._transport @property def cookies(self): """Return request cookies. A read-only dictionary-like object. """ if self._cookies is None: raw = self.headers.get(hdrs.COOKIE, '') parsed = http.cookies.SimpleCookie(raw) self._cookies = MappingProxyType( {key: val.value for key, val in parsed.items()}) return self._cookies @property def payload(self): """Return raw payload stream.""" warnings.warn('use Request.content instead', DeprecationWarning) return self._payload @property def content(self): """Return raw payload stream.""" return self._payload @property def has_body(self): """Return True if request has HTTP BODY, False otherwise.""" return self._has_body @asyncio.coroutine def release(self): """Release request. Eat unread part of HTTP BODY if present. """ chunk = yield from self._payload.readany() while chunk is not EOF_MARKER or chunk: chunk = yield from self._payload.readany() @asyncio.coroutine def read(self): """Read request body if present. Returns bytes object with full request content. """ if self._read_bytes is None: body = bytearray() while True: chunk = yield from self._payload.readany() body.extend(chunk) if chunk is EOF_MARKER: break self._read_bytes = bytes(body) return self._read_bytes @asyncio.coroutine def text(self): """Return BODY as text using encoding from .charset.""" bytes_body = yield from self.read() encoding = self.charset or 'utf-8' return bytes_body.decode(encoding) @asyncio.coroutine def json(self, *, loader=json.loads): """Return BODY as JSON.""" body = yield from self.text() return loader(body) @asyncio.coroutine def post(self): """Return POST parameters.""" if self._post is not None: return self._post if self.method not in self.POST_METHODS: self._post = MultiDictProxy(MultiDict()) return self._post content_type = self.content_type if (content_type not in ('', 'application/x-www-form-urlencoded', 'multipart/form-data')): self._post = MultiDictProxy(MultiDict()) return self._post body = yield from self.read() content_charset = self.charset or 'utf-8' environ = {'REQUEST_METHOD': self.method, 'CONTENT_LENGTH': str(len(body)), 'QUERY_STRING': '', 'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)} fs = cgi.FieldStorage(fp=io.BytesIO(body), environ=environ, keep_blank_values=True, encoding=content_charset) supported_transfer_encoding = { 'base64': binascii.a2b_base64, 'quoted-printable': binascii.a2b_qp } out = MultiDict() for field in fs.list or (): transfer_encoding = field.headers.get( hdrs.CONTENT_TRANSFER_ENCODING, None) if field.filename: ff = FileField(field.name, field.filename, field.file, # N.B. file closed error field.type) if self._post_files_cache is None: self._post_files_cache = {} self._post_files_cache[field.name] = field out.add(field.name, ff) else: value = field.value if transfer_encoding in supported_transfer_encoding: # binascii accepts bytes value = value.encode('utf-8') value = supported_transfer_encoding[ transfer_encoding](value) out.add(field.name, value) self._post = MultiDictProxy(out) return self._post def __repr__(self): return "<{} {} {} >".format(self.__class__.__name__, self.method, self.path) ############################################################ # HTTP Response classes ############################################################ class StreamResponse(HeadersMixin): def __init__(self, *, status=200, reason=None, headers=None): self._body = None self._keep_alive = None self._chunked = False self._chunk_size = None self._compression = False self._compression_force = False self._headers = CIMultiDict() self._cookies = http.cookies.SimpleCookie() self.set_status(status, reason) self._req = None self._resp_impl = None self._eof_sent = False if headers is not None: self._headers.extend(headers) def _copy_cookies(self): for cookie in self._cookies.values(): value = cookie.output(header='')[1:] self.headers.add(hdrs.SET_COOKIE, value) @property def started(self): return self._resp_impl is not None @property def status(self): return self._status @property def chunked(self): return self._chunked @property def compression(self): return self._compression @property def reason(self): return self._reason def set_status(self, status, reason=None): self._status = int(status) if reason is None: reason = ResponseImpl.calc_reason(status) self._reason = reason @property def keep_alive(self): return self._keep_alive def force_close(self): self._keep_alive = False def enable_chunked_encoding(self, chunk_size=None): """Enables automatic chunked transfer encoding.""" self._chunked = True self._chunk_size = chunk_size def enable_compression(self, force=False): """Enables response compression with `deflate` encoding.""" self._compression = True self._compression_force = force @property def headers(self): return self._headers @property def cookies(self): return self._cookies def set_cookie(self, name, value, *, expires=None, domain=None, max_age=None, path='/', secure=None, httponly=None, version=None): """Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None. """ old = self._cookies.get(name) if old is not None and old.coded_value == '': # deleted cookie self._cookies.pop(name, None) self._cookies[name] = value c = self._cookies[name] if expires is not None: c['expires'] = expires if domain is not None: c['domain'] = domain if max_age is not None: c['max-age'] = max_age if path is not None: c['path'] = path if secure is not None: c['secure'] = secure if httponly is not None: c['httponly'] = httponly if version is not None: c['version'] = version def del_cookie(self, name, *, domain=None, path='/'): """Delete cookie. Creates new empty expired cookie. """ # TODO: do we need domain/path here? self._cookies.pop(name, None) self.set_cookie(name, '', max_age=0, domain=domain, path=path) @property def content_length(self): # Just a placeholder for adding setter return super().content_length @content_length.setter def content_length(self, value): if value is not None: value = int(value) # TODO: raise error if chunked enabled self.headers[hdrs.CONTENT_LENGTH] = str(value) elif hdrs.CONTENT_LENGTH in self.headers: del self.headers[hdrs.CONTENT_LENGTH] @property def content_type(self): # Just a placeholder for adding setter return super().content_type @content_type.setter def content_type(self, value): self.content_type # read header values if needed self._content_type = str(value) self._generate_content_type_header() @property def charset(self): # Just a placeholder for adding setter return super().charset @charset.setter def charset(self, value): ctype = self.content_type # read header values if needed if ctype == 'application/octet-stream': raise RuntimeError("Setting charset for application/octet-stream " "doesn't make sense, setup content_type first") if value is None: self._content_dict.pop('charset', None) else: self._content_dict['charset'] = str(value).lower() self._generate_content_type_header() def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE): params = '; '.join("%s=%s" % i for i in self._content_dict.items()) if params: ctype = self._content_type + '; ' + params else: ctype = self._content_type self.headers[CONTENT_TYPE] = ctype def _start_pre_check(self, request): if self._resp_impl is not None: if self._req is not request: raise RuntimeError( 'Response has been started with different request.') else: return self._resp_impl else: return None def start(self, request): resp_impl = self._start_pre_check(request) if resp_impl is not None: return resp_impl self._req = request keep_alive = self._keep_alive if keep_alive is None: keep_alive = request.keep_alive self._keep_alive = keep_alive resp_impl = self._resp_impl = ResponseImpl( request._writer, self._status, request.version, not keep_alive, self._reason) self._copy_cookies() if self._compression: if (self._compression_force or 'deflate' in request.headers.get( hdrs.ACCEPT_ENCODING, '')): resp_impl.add_compression_filter() if self._chunked: resp_impl.enable_chunked_encoding() if self._chunk_size: resp_impl.add_chunking_filter(self._chunk_size) headers = self.headers.items() for key, val in headers: resp_impl.add_header(key, val) resp_impl.send_headers() return resp_impl def write(self, data): assert isinstance(data, (bytes, bytearray, memoryview)), \ 'data argument must be byte-ish (%r)' % type(data) if self._eof_sent: raise RuntimeError("Cannot call write() after write_eof()") if self._resp_impl is None: raise RuntimeError("Cannot call write() before start()") if data: return self._resp_impl.write(data) else: return () @asyncio.coroutine def drain(self): if self._resp_impl is None: raise RuntimeError("Response has not been started") yield from self._resp_impl.transport.drain() @asyncio.coroutine def write_eof(self): if self._eof_sent: return if self._resp_impl is None: raise RuntimeError("Response has not been started") yield from self._resp_impl.write_eof() self._eof_sent = True def __repr__(self): if self.started: info = "{} {} ".format(self._req.method, self._req.path) else: info = "not started" return "<{} {} {}>".format(self.__class__.__name__, self.reason, info) class Response(StreamResponse): def __init__(self, *, body=None, status=200, reason=None, text=None, headers=None, content_type=None): super().__init__(status=status, reason=reason, headers=headers) if body is not None and text is not None: raise ValueError("body and text are not allowed together.") if text is not None: if hdrs.CONTENT_TYPE not in self.headers: # fast path for filling headers if not isinstance(text, str): raise TypeError('text argument must be str (%r)' % type(text)) if content_type is None: content_type = 'text/plain' self.headers[hdrs.CONTENT_TYPE] = ( content_type + '; charset=utf-8') self._content_type = content_type self._content_dict = {'charset': 'utf-8'} self.body = text.encode('utf-8') else: self.text = text else: if content_type: self.content_type = content_type if body is not None: self.body = body else: self.body = None @property def body(self): return self._body @body.setter def body(self, body): if body is not None and not isinstance(body, bytes): raise TypeError('body argument must be bytes (%r)' % type(body)) self._body = body if body is not None: self.content_length = len(body) else: self.content_length = 0 @property def text(self): return self._body.decode(self.charset or 'utf-8') @text.setter def text(self, text): if text is not None and not isinstance(text, str): raise TypeError('text argument must be str (%r)' % type(text)) if self.content_type == 'application/octet-stream': self.content_type = 'text/plain' if self.charset is None: self.charset = 'utf-8' self.body = text.encode(self.charset) @asyncio.coroutine def write_eof(self): body = self._body if body is not None: self.write(body) yield from super().write_eof()
{ "repo_name": "lfblogs/aio2py", "path": "aio2py/required/aiohttp/web_reqrep.py", "copies": "1", "size": "20802", "license": "apache-2.0", "hash": -6710569348961647000, "line_mean": 29.7267355982, "line_max": 84, "alpha_frac": 0.5543697721, "autogenerated": false, "ratio": 4.427841634738186, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5482211406838187, "avg_score": null, "num_lines": null }
__all__ = ['require'] import subprocess, os, codecs, glob from .evaljs import translate_js import six DID_INIT = False DIRNAME = os.path.dirname(os.path.abspath(__file__)) PY_NODE_MODULES_PATH = os.path.join(DIRNAME, 'py_node_modules') def _init(): global DID_INIT if DID_INIT: return assert subprocess.call( 'node -v', shell=True, cwd=DIRNAME ) == 0, 'You must have node installed! run: brew install node' assert subprocess.call( 'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify' % repr(DIRNAME), shell=True, cwd=DIRNAME) == 0, 'Could not link required node_modules' DID_INIT = True ADD_TO_GLOBALS_FUNC = ''' ;function addToGlobals(name, obj) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { Object.prototype._fake_exports = {}; } Object.prototype._fake_exports[name] = obj; }; ''' # subprocess.call("""node -e 'require("browserify")'""", shell=True) GET_FROM_GLOBALS_FUNC = ''' ;function getFromGlobals(name) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { throw Error("Could not find any value named "+name); } if (Object.prototype._fake_exports.hasOwnProperty(name)) { return Object.prototype._fake_exports[name]; } else { throw Error("Could not find any value named "+name); } }; ''' def require(module_name, include_polyfill=False, update=False): assert isinstance(module_name, str), 'module_name must be a string!' py_name = module_name.replace('-', '_') module_filename = '%s.py' % py_name var_name = py_name.rpartition('/')[-1] if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH, module_filename)) or update: _init() in_file_name = 'tmp0in439341018923js2py.js' out_file_name = 'tmp0out439341018923js2py.js' code = ADD_TO_GLOBALS_FUNC if include_polyfill: code += "\n;require('babel-polyfill');\n" code += """ var module_temp_love_python = require(%s); addToGlobals(%s, module_temp_love_python); """ % (repr(module_name), repr(module_name)) with open(os.path.join(DIRNAME, in_file_name), 'wb') as f: f.write(code.encode('utf-8') if six.PY3 else code) pkg_name = module_name.partition('/')[0] # make sure the module is installed assert subprocess.call( 'cd %s;npm install %s' % (repr(DIRNAME), pkg_name), shell=True, cwd=DIRNAME ) == 0, 'Could not install the required module: ' + pkg_name # convert the module assert subprocess.call( '''node -e "(require('browserify')('./%s').bundle(function (err,data) {fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' % (in_file_name, out_file_name), shell=True, cwd=DIRNAME, ) == 0, 'Error when converting module to the js bundle' os.remove(os.path.join(DIRNAME, in_file_name)) with codecs.open(os.path.join(DIRNAME, out_file_name), "r", "utf-8") as f: js_code = f.read() os.remove(os.path.join(DIRNAME, out_file_name)) js_code += GET_FROM_GLOBALS_FUNC js_code += ';var %s = getFromGlobals(%s);%s' % ( var_name, repr(module_name), var_name) print('Please wait, translating...') py_code = translate_js(js_code) dirname = os.path.dirname( os.path.join(PY_NODE_MODULES_PATH, module_filename)) if not os.path.isdir(dirname): os.makedirs(dirname) with open(os.path.join(PY_NODE_MODULES_PATH, module_filename), 'wb') as f: f.write(py_code.encode('utf-8') if six.PY3 else py_code) else: with codecs.open( os.path.join(PY_NODE_MODULES_PATH, module_filename), "r", "utf-8") as f: py_code = f.read() context = {} exec (py_code, context) return context['var'][var_name].to_py()
{ "repo_name": "pannal/Subliminal.bundle", "path": "Contents/Libraries/Shared/js2py/node_import.py", "copies": "4", "size": "4163", "license": "mit", "hash": -9025132002234256000, "line_mean": 35.8407079646, "line_max": 206, "alpha_frac": 0.5851549363, "autogenerated": false, "ratio": 3.3900651465798046, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5975220082879805, "avg_score": null, "num_lines": null }
__all__ = ["require"] import os import sys import subprocess import time import json import traceback ### version_info = sys.version_info[:2] PYTHON_VERSION = version_info[0] + float("." + str(version_info[1])) # TODO: make this nice if PYTHON_VERSION >= 3: from urllib.request import urlopen decode_if_py3 = lambda x: x.decode("utf-8") encode_if_py3 = lambda x: bytes(x, "utf-8") string_type = str else: from urllib2 import urlopen decode_if_py3 = lambda x: x encode_if_py3 = lambda x: x string_type = unicode DEBUG, INFO, WARNING, ERROR, GOOD_NEWS = range(5) PLATFORM = "windows" if sys.platform == "win32" else "unix" def indent(src, l=4): return "\n".join(["{}{}".format(l*" ", s.rstrip()) for s in src.split("\n")]) class Logging(): def __init__(self, user="REX"): self.user = user self.handlers = [] self.formats = { INFO : "INFO {0:<15} {1}", DEBUG : "\033[34mDEBUG {0:<15} {1}\033[0m", WARNING : "\033[33mWARNING\033[0m {0:<15} {1}", ERROR : "\033[31mERROR\033[0m {0:<15} {1}", GOOD_NEWS : "\033[32mGOOD NEWS\033[0m {0:<15} {1}" } self.formats_win = { DEBUG : "DEBUG {0:<10} {1}", INFO : "INFO {0:<10} {1}", WARNING : "WARNING {0:<10} {1}", ERROR : "ERROR {0:<10} {1}", GOOD_NEWS : "GOOD NEWS {0:<10} {1}" } def add_handler(self, handler): self.handlers.append(handler) def _send(self, msgtype, *args, **kwargs): message = " ".join([str(arg) for arg in args]) user = kwargs.get("user", self.user) if kwargs.get("handlers", True): for handler in self.handlers: handler(user=self.user, message_type=msgtype, message=message) if PLATFORM == "unix": try: print (self.formats[msgtype].format(user, message)) except: print (message.encode("utf-8")) else: try: print (self.formats_win[msgtype].format(user, message)) except: print (message.encode("utf-8")) def debug(self, *args, **kwargs): self._send(DEBUG, *args, **kwargs) def info(self, *args, **kwargs): self._send(INFO, *args, **kwargs) def warning(self, *args, **kwargs): self._send(WARNING, *args, **kwargs) def error(self, *args, **kwargs): self._send(ERROR, *args, **kwargs) def goodnews(self, *args, **kwargs): self._send(GOOD_NEWS, *args, **kwargs) logging = Logging() def log_traceback(message="Exception!", **kwargs): tb = traceback.format_exc() msg = "{}\n\n{}".format(message, indent(tb)) logging.error(msg, **kwargs) return msg def critical_error(msg, **kwargs): logging.error(msg, **kwargs) logging.debug("Critical error. Terminating program.") sys.exit(1) class Repository(object): def __init__(self, parent, url, **kwargs): self.parent = parent self.url = url self.settings = kwargs self.base_name = os.path.basename(url) self.path = os.path.join(self.parent.vendor_dir, self.base_name) def get(self, key, default=None): return self.settings.get(key, default) def __getitem__(self, key): return self.settings[key] def __repr__(self): return "vendor module '{}'".format(self.base_name) class Rex(object): def __init__(self): self.app_dir = os.path.abspath(os.getcwd()) self.vendor_dir =os.path.join(self.app_dir, "vendor") self.manifest_path = os.path.join(self.app_dir, "rex.json") self.self_update() self.main() @property def force_update(self): return "--rex-update" in sys.argv def chdir(self, path): os.chdir(path) @property def repos(self): if not hasattr(self, "_repos"): if not os.path.exists(self.manifest_path): self._repos = [] else: try: self.manifest = json.load(open(self.manifest_path)) if not self.manifest: return [] self._repos = [] for repo_url in self.manifest.keys(): repo_settings = self.manifest[repo_url] repo = Repository(self, repo_url, **repo_settings) self._repos.append(repo) except Exception: log_traceback() critical_error("Unable to load rex manifest. Exiting") self._repos = [] return self._repos def self_update(self): if not self.force_update: return if os.path.exists(".rex_devel"): logging.debug("This is a development machine. Skipping rex auto update.") return response = urlopen("https://imm.cz/rex.py") new_rex = decode_if_py3(response.read()) old_rex = open("rex.py").read() if new_rex != old_rex: logging.info("Updating REX core") with open("rex.py", "w") as f: f.write(new_rex) else: logging.info("REX is up to date") def main(self): for repo in self.repos: try: self.update(repo) and self.post_install(repo) except Exception: log_traceback() self.chdir(self.app_dir) if self.force_update: logging.goodnews("Vendor modules updated") sys.exit(0) def update(self, repo): if not os.path.exists(self.vendor_dir): os.makedirs(self.vendor_dir) if os.path.exists(repo.path): if self.force_update: logging.info("Updating {}".format(repo)) self.chdir(repo.path) cmd = ["git", "pull"] else: return True else: logging.info("Downloading {}".format(repo)) self.chdir(self.vendor_dir) cmd = ["git", "clone", repo.url] p = subprocess.Popen(cmd) while p.poll() == None: time.sleep(.1) if p.returncode: critical_error("Unable to update {}".format(repo)) return True def post_install(self, repo): if (repo.get("python-path") or repo.get("python_path")) and not repo.path in sys.path: sys.path.insert(0, repo.path) rex = Rex() def require(url, **kwargs): repo = Repository(rex, url, **kwargs) return rex.update(repo) and rex.post_install(repo)
{ "repo_name": "martastain/liver", "path": "rex.py", "copies": "1", "size": "6749", "license": "mit", "hash": 6419277168069939000, "line_mean": 29.1294642857, "line_max": 94, "alpha_frac": 0.5291154245, "autogenerated": false, "ratio": 3.6560130010834238, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46851284255834236, "avg_score": null, "num_lines": null }
__all__ = ['require'] import subprocess, os, codecs, glob from .evaljs import translate_js, DEFAULT_HEADER from .translators.friendly_nodes import is_valid_py_name import six import tempfile import hashlib import random DID_INIT = False DIRNAME = tempfile.mkdtemp() PY_NODE_MODULES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py_node_modules') def _init(): global DID_INIT if DID_INIT: return assert subprocess.call( 'node -v', shell=True, cwd=DIRNAME ) == 0, 'You must have node installed! run: brew install node' assert subprocess.call( 'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify browserify-shim' % repr(DIRNAME), shell=True, cwd=DIRNAME) == 0, 'Could not link required node_modules' DID_INIT = True ADD_TO_GLOBALS_FUNC = ''' ;function addToGlobals(name, obj) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { Object.prototype._fake_exports = {}; } Object.prototype._fake_exports[name] = obj; }; ''' # subprocess.call("""node -e 'require("browserify")'""", shell=True) GET_FROM_GLOBALS_FUNC = ''' ;function getFromGlobals(name) { if (!Object.prototype.hasOwnProperty('_fake_exports')) { throw Error("Could not find any value named "+name); } if (Object.prototype._fake_exports.hasOwnProperty(name)) { return Object.prototype._fake_exports[name]; } else { throw Error("Could not find any value named "+name); } }; ''' def _get_module_py_name(module_name): return module_name.replace('-', '_') def _get_module_var_name(module_name): cand = _get_module_py_name(module_name).rpartition('/')[-1] if not is_valid_py_name(cand): raise ValueError( "Invalid Python module name %s (generated from %s). Unsupported/invalid npm module specification?" % ( repr(cand), repr(module_name))) return cand def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False, maybe_version_str=""): assert isinstance(module_name, str), 'module_name must be a string!' py_name = _get_module_py_name(module_name) module_filename = '%s.py' % py_name var_name = _get_module_var_name(module_name) if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH, module_filename)) or update: _init() module_hash = hashlib.sha1(module_name.encode("utf-8")).hexdigest()[:15] version = random.randrange(10000000000000) in_file_name = 'in_%s_%d.js' % (module_hash, version) out_file_name = 'out_%s_%d.js' % (module_hash, version) code = ADD_TO_GLOBALS_FUNC if include_polyfill: code += "\n;require('babel-polyfill');\n" code += """ var module_temp_love_python = require(%s); addToGlobals(%s, module_temp_love_python); """ % (repr(module_name), repr(module_name)) with open(os.path.join(DIRNAME, in_file_name), 'wb') as f: f.write(code.encode('utf-8') if six.PY3 else code) pkg_name = module_name.partition('/')[0] if maybe_version_str: pkg_name += '@' + maybe_version_str # make sure the module is installed assert subprocess.call( 'cd %s;npm install %s' % (repr(DIRNAME), pkg_name), shell=True, cwd=DIRNAME ) == 0, 'Could not install the required module: ' + pkg_name # convert the module assert subprocess.call( '''node -e "(require('browserify')('./%s').bundle(function (err,data) {if (err) {console.log(err);throw new Error(err);};fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' % (in_file_name, out_file_name), shell=True, cwd=DIRNAME, ) == 0, 'Error when converting module to the js bundle' os.remove(os.path.join(DIRNAME, in_file_name)) with codecs.open(os.path.join(DIRNAME, out_file_name), "r", "utf-8") as f: js_code = f.read() print("Bundled JS library dumped at: %s" % os.path.join(DIRNAME, out_file_name)) if len(js_code) < 50: raise RuntimeError("Candidate JS bundle too short - likely browserify issue.") js_code += GET_FROM_GLOBALS_FUNC js_code += ';var %s = getFromGlobals(%s);%s' % ( var_name, repr(module_name), var_name) print('Please wait, translating...') py_code = translate_js(js_code) dirname = os.path.dirname( os.path.join(PY_NODE_MODULES_PATH, module_filename)) if not os.path.isdir(dirname): os.makedirs(dirname) with open(os.path.join(PY_NODE_MODULES_PATH, module_filename), 'wb') as f: f.write(py_code.encode('utf-8') if six.PY3 else py_code) else: with codecs.open( os.path.join(PY_NODE_MODULES_PATH, module_filename), "r", "utf-8") as f: py_code = f.read() return py_code def require(module_name, include_polyfill=True, update=False, context=None): """ Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and finally translates the generated JS bundle to Python via Js2Py. Returns a pure python object that behaves like the installed module. Nice! :param module_name: Name of the npm module to require. For example 'esprima'. Supports specific versions via @ specification. Eg: 'crypto-js@3.3'. :param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed for some modules that use unsupported features of JS6 such as Map or typed arrays. :param update: Whether to force update the translation. Otherwise uses a cached version if exists. :param context: Optional context in which the translated module should be executed in. If provided, the header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports. :return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object. """ module_name, maybe_version = (module_name+"@@@").split('@')[:2] py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update, maybe_version_str=maybe_version) # this is a bit hacky but we need to strip the default header from the generated code... if context is not None: if not py_code.startswith(DEFAULT_HEADER): # new header version? retranslate... assert not update, "Unexpected header." py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=True) assert py_code.startswith(DEFAULT_HEADER), "Unexpected header." py_code = py_code[len(DEFAULT_HEADER):] context = {} if context is None else context exec(py_code, context) return context['var'][_get_module_var_name(module_name)].to_py()
{ "repo_name": "PiotrDabkowski/Js2Py", "path": "js2py/node_import.py", "copies": "1", "size": "7224", "license": "mit", "hash": -2466098650793958000, "line_mean": 42.2574850299, "line_max": 256, "alpha_frac": 0.6290143965, "autogenerated": false, "ratio": 3.6083916083916083, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9730797593266383, "avg_score": 0.0013216823250452835, "num_lines": 167 }
__all__ = ['resample'] import itertools import logging try: from tqdm import tqdm except ImportError: def tqdm(x): """Dummy replacement for progress bar.""" x from miles import (Database, Distribution, Matrices, TransitionKernel, bold, save_distributions) # noqa: E501 def resample(simulation, samples, database_dir, stationary_distributions, transition_matrix, lag_time_matrix, stationary_flux, local_mfpts, stationary_probability): logging.info('Running resampling...') milestones = simulation.milestones reactant_distribution = simulation.reactant_distribution database = simulation.database kernel = TransitionKernel(database) kernel.compute_distributions() K = kernel.matrices.K new_database = Database(database.anchors) new_matrices = Matrices(simulation.milestones) new_kernel = TransitionKernel(new_matrices) tuples = [] for final in kernel.keys(): for initial in kernel[final].keys(): i, j = initial.index, final.index dist = kernel[final][initial] tuples.append((i, j, K[i, j], dist)) sorted_tuples = sorted(tuples, key=lambda x: x[0]) grouped_tuples = itertools.groupby(sorted_tuples, lambda x: x[0]) for initial, grouped in grouped_tuples: distribution = Distribution() for t in list(grouped): distribution += t[2] * t[3] logging.info('Sampling from Milestone {}.'.format(initial)) # Resample kernel obtained from database and copy the results # into new_kernel. for _ in tqdm(range(samples)): transition = distribution.sample() new_kernel.update(transition) new_database.update(transition) if transition.final_milestone in milestones.products: new_transition = reactant_distribution.sample() new_transition.lag_time = 0.0 new_transition.initial_milestone = transition.final_milestone new_database.update(new_transition) new_kernel.update(new_transition) new_distributions = new_kernel.compute_distributions() save_distributions(new_distributions, stationary_distributions) new_kernel.matrices.save(transition_matrix, lag_time_matrix, stationary_flux, local_mfpts, stationary_probability) mfpt = new_kernel.matrices.mfpt logging.info(bold('Mean first passage time: {} units of ' 'time.'.format(mfpt))) new_database.save(database_dir) logging.info('Done.')
{ "repo_name": "clsb/miles", "path": "miles/resample.py", "copies": "1", "size": "2650", "license": "mit", "hash": 7721327671290658000, "line_mean": 33.4155844156, "line_max": 110, "alpha_frac": 0.64, "autogenerated": false, "ratio": 4.260450160771704, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5400450160771704, "avg_score": null, "num_lines": null }
"""All response related utility functions.""" import logging try: from http import client as httplib except ImportError: # pragma: no cover import httplib import json import uuid import six import halogen from flask import request, abort, current_app, make_response try: from collections import OrderedDict except ImportError: # pragma: no cover from ordereddict import OrderedDict MIMETYPE_SERIALIZER = OrderedDict([ ( 'application/hal+json', lambda context: json.dumps(context, indent=4, sort_keys=True) ), ]) def api_response(content, status_code=None, headers=None): """API response handler. :note: The contents are encoded according to the request's accepted mimetype. :param content: Response body. :param status_code: HTTP status code. :param headers: Headers. :return: A tuple with `contents`, `status_code`, `headers`. """ mimetype = get_preferred_accepted_mimetype() all_headers = set_basic_headers(request, mimetype=mimetype) if headers: all_headers.update(headers) serializer = MIMETYPE_SERIALIZER[mimetype] body = serializer(content) if content is not None else '' return body, status_code, all_headers def get_preferred_accepted_mimetype(): """Return the request's accepted mimetype. If the request accepts more than one, it returns the one that appears first on MIMETYPE_SERIALIZER. """ for mimetype, serializer in MIMETYPE_SERIALIZER.items(): if mimetype in request.accept_mimetypes: return mimetype abort( httplib.NOT_ACCEPTABLE, u'The only accepted mimetypes are {0}'.format(', '.join(MIMETYPE_SERIALIZER.keys())) ) def get_current_profile_link(*args, **kwargs): """Get the profile link from the current endpoint.""" try: schema = current_app.view_functions[request.endpoint].schema profile = next((attr for attr in schema.__attrs__ if attr.name == 'profile')) return profile.serialize(None)['href'] except (KeyError, AttributeError, StopIteration): return None def get_error_message(exception): """Get the exception message. :param exception: Exception object. :return: Exception message or description. :raises AttributeError: when the message is not available. """ message = getattr(exception, 'message', None) if not message: message = exception.description if not isinstance(message, six.string_types): message = u'{0}: {1}'.format(exception.__class__.__name__, message) return message class VNDError(halogen.Schema): """An exception that can be serialized to the hal+json format.""" message = halogen.Attr(required=False, attr=get_error_message) logref = halogen.Attr(attr=lambda error: uuid.uuid4().hex) help = halogen.Link(required=False, attr=get_current_profile_link) # Extended fields type = halogen.Attr(required=False, attr='error_type', default='UNKNOWN_ERROR') """Error type identifier.""" details = halogen.Attr(required=False, attr='data') """Optional additional error details.""" def vnd_error_response(exception, status_code=None, headers=None): """API error response handler. If there is an uncaught internal server error it is logged and sent to sentry. It is not shown to the users and is not passed along, in essence: it disappears hereafter. :param exception: Exception raised in the request/response cycle. :param status_code: HTTP status code. :param headers: Extra response headers. :return: vnd.error response with the error details. """ all_headers = set_basic_headers(request, mimetype='application/hal+json') if headers: all_headers.update(headers) if not hasattr(exception, 'message'): try: exception.message = exception.args[0] except IndexError: exception.message = '' body = VNDError.serialize(exception) if status_code == httplib.INTERNAL_SERVER_ERROR or current_app.config['SENTRY_CAPTURE_USER_ERRORS']: sentry = getattr(current_app, 'sentry', None) if sentry: sentry.captureException() if status_code == httplib.INTERNAL_SERVER_ERROR: log_exception = True exc_info = True level = logging.ERROR else: log_exception = current_app.config['LOG_USER_ERRORS'] exc_info = current_app.config['LOG_USER_ERROR_EXC_INFO'] level = current_app.config['LOG_USER_ERROR_LEVEL'] if log_exception: current_app.logger.log(level, u"{type} {logref} {body}".format(body=body, **body), exc_info=exc_info) response = make_response(json.dumps(body), status_code, all_headers) return response def set_basic_headers(request, mimetype=None): """Set the basic headers for API response. :param methods: a wildcard or a list of allowed HTTP methods. :param mimetype: a string representing the mimetype to use to render text. :return: a dictionary with basic headers. """ headers = { 'Access-Control-Allow-Headers': ['Content-Type', 'Authorization', 'Accept', 'X-Requested-With'], 'Access-Control-Expose-Headers': ['Link'], } if mimetype: headers['Content-Type'] = mimetype url_adapter = current_app.url_map.bind( server_name=current_app.config['SERVER_NAME'] or request.environ['REMOTE_ADDR']) methods = url_adapter.allowed_methods(request.path) headers['Access-Control-Allow-Methods'] = methods headers['Allow'] = methods # check the referer because of PREFLIGHT OPTIONS or other AJAX requests referer = request.headers.get('Referer') if referer: origin = request.headers.get('Origin') headers['Access-Control-Allow-Credentials'] = 'true' else: origin = '*' headers['Access-Control-Allow-Origin'] = origin return headers
{ "repo_name": "paylogic/atilla", "path": "atilla/response.py", "copies": "1", "size": "5917", "license": "mit", "hash": -961126353736459800, "line_mean": 31.6906077348, "line_max": 109, "alpha_frac": 0.6782153118, "autogenerated": false, "ratio": 4.033401499659169, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5211616811459169, "avg_score": null, "num_lines": null }
__all__ = ['restoredata', 'storedata'] import pickle import warnings from IPython.core.getipython import get_ipython def storedata(filename=None): """Store the state of the current credolib workspace in a pickle file.""" if filename is None: filename = 'credolib_state.pickle' ns = get_ipython().user_ns with open(filename, 'wb') as f: d = {} for var in ['_headers', '_loaders', '_data1d', '_data2d', '_data1dunited', 'allsamplenames', '_headers_sample', 'badfsns', '_rowavg', 'saveto_dir', 'badfsns_datcmp', 'auximages_dir', 'subtractedsamplenames', 'outputpath', 'saveto_dir_rel', 'auximages_dir_rel', 'crd_prefix']: try: d[var] = ns[var] except KeyError: warnings.warn('Skipping storage of unavailable variable "%s"' % var) pickle.dump(d, f) def restoredata(filename=None): """Restore the state of the credolib workspace from a pickle file.""" if filename is None: filename = 'credolib_state.pickle' ns = get_ipython().user_ns with open(filename, 'rb') as f: d = pickle.load(f) for k in d.keys(): ns[k] = d[k]
{ "repo_name": "awacha/credolib", "path": "credolib/persistence.py", "copies": "1", "size": "1246", "license": "bsd-3-clause", "hash": -3314040563476166000, "line_mean": 34.6, "line_max": 111, "alpha_frac": 0.5754414125, "autogenerated": false, "ratio": 3.63265306122449, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.470809447372449, "avg_score": null, "num_lines": null }
__all__ = ['retricon'] import codecs import hashlib import math import struct from PIL import Image, ImageDraw from six import string_types def brightness(r, g, b): return math.sqrt(.241 * r * r + .691 * g * g + .068 * b * b) def key_brightness(a): return brightness(a[0], a[1], a[2]) def fixed_length_hash(buf, length): if length > 64: msg = 'sha512 can only generate 64B of data: {}B requested' raise Exception(msg.format(length)) hex_length = length * 2 val = hashlib.sha512(buf).hexdigest() if len(val) % hex_length != 0: val += "0" * (hex_length - len(val) % hex_length) ii = hex_length ret = val[0:ii] while ii < len(val): ret = format(int(ret, 16) ^ int(val[ii:ii + hex_length], 16), 'x') ii += hex_length if len(ret) < hex_length: ret = "0" * (hex_length - len(ret)) + ret return codecs.decode(ret, 'hex') def id_hash(name, length, min_fill, max_fill, use_colors): buf = name + " " buf_bytes = str.encode(buf) needed_bytes = int(math.ceil(length / 8.0)) if use_colors: needed_bytes += 6 for ii in range(0x100): buf_bytes = buf_bytes[:-1] + struct.pack('B', ii) fp = fixed_length_hash(buf_bytes, needed_bytes) fp = [struct.unpack('B', fp[ii:ii+1])[0] for ii, x in enumerate(fp)] pixels = [] set_pixels = 0 if use_colors: colors = [fp[:3], fp[3:6]] colors = sorted(colors, key=key_brightness) fp = fp[6:] else: colors = [None, None] for byte, offset in ((_byte, _offset) for _byte in fp for _offset in range(7, -1, -1)): pixel_val = (byte >> offset) & 1 pixels.append(pixel_val) if pixel_val == 1: set_pixels += 1 if len(pixels) == length: break if min_fill * length < set_pixels < max_fill * length: return { 'colors': colors, 'pixels': pixels } raise Exception("String `{}` unhashable in single-byte search" " space.".format(name)) def fill_pixels(raw, dimension): pic = [None] * dimension for row in range(dimension): pic[row] = [None] * dimension for col in range(dimension): ii = row * dimension + col pic[row][col] = raw['pixels'][ii] return pic def fill_pixels_vert_sym(raw, dimension): mid = int(math.ceil(dimension / 2.0)) odd = dimension % 2 != 0 pic = [None] * dimension for row in range(dimension): pic[row] = [None] * dimension for col in range(dimension): if col < mid: ii = row * mid + col else: dist_middle = mid - col if odd: dist_middle -= 1 dist_middle = abs(dist_middle) ii = row * mid + mid - 1 - dist_middle pic[row][col] = raw['pixels'][ii] return pic def fill_pixels_cent_sym(raw, dimension): mid = int(math.ceil(dimension / 2.0)) odd = dimension % 2 != 0 pic = [None] * dimension for row in range(dimension): pic[row] = [None] * dimension for col in range(dimension): if col >= mid: dist_middle = mid - col if odd: dist_middle -= 1 dist_middle = abs(dist_middle) if row < mid: if col < mid: ii = (row * mid) + col else: ii = (row * mid) + mid - 1 - dist_middle else: if col < mid: ii = (dimension - 1 - row) * mid + col else: ii = (dimension - 1 - row) * mid + mid - 1 - dist_middle pic[row][col] = raw['pixels'][ii] return pic def fill_pixels_hori_sym(raw, dimension): mid = int(math.ceil(dimension / 2.0)) pic = [None] * dimension for row in range(dimension): pic[row] = [None] * dimension for col in range(dimension): if row < mid: ii = (row * dimension) + col else: ii = (dimension - 1 - row) * dimension + col pic[row][col] = raw['pixels'][ii] return pic def retricon(name, tiles=5, tile_size=1, tile_color=0, bg_color=None, tile_padding=0, image_padding=0, min_fill=0.3, max_fill=0.90, vertical_sym=True, horizontal_sym=False, style=None, width=500): if style == 'github': tile_size = 70 bg_color = "F0F0F0" tile_padding = -1 image_padding = 35 tiles = 5 vertical_sym = True horizontal_sym = False elif style == 'gravatar': bg_color = 1 tiles = 8 vertical_sym = True horizontal_sym = False elif style == 'mono': bg_color = 'F0F0F0' tile_color = '000000' tiles = 6 tile_size = 12 tile_padding = -1 image_padding = 6 vertical_sym = True horizontal_sym = False elif style == 'mosaic': image_padding = 2 tile_padding = 1 tile_size = 16 bg_color = 'F0F0F0' vertical_sym = True horizontal_sym = False elif style == 'mini': tile_size = 10 tile_padding = 1 tiles = 3 bg_color = 0 tile_color = 1 vertical_sym = False horizontal_sym = False elif style == 'window': tile_color = [255, 255, 255, 255] bg_color = 0 image_padding = 2 tile_padding = 1 tile_size = 16 vertical_sym = True horizontal_sym = False elif style is not None: raise ValueError('Wrong parameter style') if bg_color is None: bg_color = [0, 0, 0, 0] if isinstance(bg_color, string_types): bg_color = [ struct.unpack('B', codecs.decode(bg_color[0:2], 'hex'))[0], struct.unpack('B', codecs.decode(bg_color[2:4], 'hex'))[0], struct.unpack('B', codecs.decode(bg_color[4:6], 'hex'))[0] ] if tile_color is None: tile_color = [0, 0, 0, 0] if isinstance(tile_color, string_types): tile_color = [ struct.unpack('B', codecs.decode(tile_color[0:2], 'hex'))[0], struct.unpack('B', codecs.decode(tile_color[2:4], 'hex'))[0], struct.unpack('B', codecs.decode(tile_color[4:6], 'hex'))[0] ] if isinstance(bg_color, int) or isinstance(tile_color, int): use_color = True else: use_color = False tile_width = tile_size + tile_padding * 2 canvas_size = tile_width * tiles + image_padding * 2 draw_scale = max((width // canvas_size), 1) dimension = tiles mid = int(math.ceil(dimension / 2.0)) if vertical_sym and horizontal_sym: raw = id_hash(name, mid * mid, min_fill, max_fill, use_color) pic = fill_pixels_cent_sym(raw, dimension) elif vertical_sym or horizontal_sym: raw = id_hash(name, mid * dimension, min_fill, max_fill, use_color) if vertical_sym: pic = fill_pixels_vert_sym(raw, dimension) else: pic = fill_pixels_hori_sym(raw, dimension) else: raw = id_hash(name, dimension * dimension, min_fill, max_fill, use_color) pic = fill_pixels(raw, dimension) if isinstance(bg_color, int): bg_color = raw['colors'][bg_color] if isinstance(tile_color, int): tile_color = raw['colors'][tile_color] im = Image.new('RGBA', (canvas_size * draw_scale, canvas_size * draw_scale)) draw = ImageDraw.Draw(im) draw.rectangle((0, 0, canvas_size * draw_scale, canvas_size * draw_scale), fill=tuple(bg_color)) for x in range(dimension): for y in range(dimension): if pic[y][x] == 1: x0 = (x * tile_width) + tile_padding + image_padding y0 = (y * tile_width) + tile_padding + image_padding draw.rectangle( (x0 * draw_scale, y0 * draw_scale, (x0 + tile_size) * draw_scale - 1, (y0 + tile_size) * draw_scale - 1), fill=tuple(tile_color) ) del draw out_img = im.resize((width, width), Image.ANTIALIAS) del im return out_img def test(): val = "test-1000" im = retricon(val) im.save('default.png', 'PNG') im = retricon(val, style='github') im.save('github.png', 'PNG') im = retricon(val, style='gravatar') im.save('gravatar.png', 'PNG') im = retricon(val, style='mono') im.save('mono.png', 'PNG') im = retricon(val, style='mini') im.save('mini.png', 'PNG') im = retricon(val, style='mosaic') im.save('mosaic.png', 'PNG') im = retricon(val, style='window') im.save('window.png', 'PNG') im = retricon(val, vertical_sym=True, horizontal_sym=False, tiles=14, bg_color=0, tile_color=1, tile_padding=1, tile_size=10) im.save('vertical_sym.png', 'PNG') im = retricon(val, vertical_sym=False, horizontal_sym=True, tiles=14, bg_color=0, tile_color=1, tile_padding=1, tile_size=10) im.save('horizontal_sym.png', 'PNG') im = retricon(val, vertical_sym=False, horizontal_sym=False, tiles=10, bg_color=0, tile_color=1, tile_padding=1, tile_size=10) im.save('noSym.png', 'PNG') im = retricon(val, vertical_sym=True, horizontal_sym=True, tiles=44, bg_color='00ff00', tile_color='ff0000', tile_padding=1, tile_size=10, max_fill=.5) im.save('center_sym.png', 'PNG') im = retricon(val, bg_color=[255, 255, 0, 50], tile_color=None) im.save('test_trans.png', 'PNG') if __name__ == "__main__": test()
{ "repo_name": "rphlo/py-retricon", "path": "retricon/retricon.py", "copies": "1", "size": "10011", "license": "mit", "hash": -5737924980381664000, "line_mean": 33.1672354949, "line_max": 78, "alpha_frac": 0.5213265408, "autogenerated": false, "ratio": 3.4966818023052744, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4518008343105274, "avg_score": null, "num_lines": null }
__all__ = ["RiakClient", "ThreadedRiakClient"] import json import Queue import threading from riakcached import exceptions from riakcached.pools import Urllib3Pool class RiakClient(object): """A Memcache like client to the Riak HTTP Interface """ __slots__ = [ "_serializers", "_deserializers", "base_url", "bucket", "pool", ] def __init__(self, bucket, pool=None): """Constructor for a new :class:`riakcached.clients.RiakClient` Pool - if no pool is provided then a default :class:`riakcached.pools.Urllib3Pool` is used :param bucket: The name of the Riak bucket to use :type bucket: str :param pool: The :class:`riakcached.pools.Pool` to use for requests :type pool: :class:`riakcached.pools.Pool` """ if pool is None: self.pool = Urllib3Pool() else: self.pool = pool self.bucket = bucket self.base_url = self.pool.url.rstrip("/") self._serializers = { "application/json": json.dumps, } self._deserializers = { "application/json": json.loads, } def add_serializer(self, content_type, serializer): """Add a content-type serializer to the client The `serializer` function should have the following definition:: def serializer(data): return do_something(data) and should return a `str` Example:: def base64_serializer(data): return base64.b64encode(data) client.add_serializer("application/base64", base64_serializer) :param content_type: the content-type to associate `serializer` with :type content_type: str :param serializer: the serializer function to use with `content_type` :type serializer: function """ content_type = content_type.lower() self._serializers[content_type] = serializer def add_deserializer(self, content_type, deserializer): """Add a content-type deserializer to the client The `deserializer` function should have the following definition:: def deserializer(data): return undo_something(data) Example:: def base64_deserializer(data): return base64.b64decode(data) client.add_deserializer("application/base64", base64_deserializer) :param content_type: the content-type to associate `deserializer` with :type content_type: str :param deserializer: the deserializer function to use with `content_type` :type deserializer: function """ content_type = content_type.lower() self._deserializers[content_type] = deserializer def serialize(self, data, content_type): """Serialize the provided `data` to `content_type` This method will lookup the registered serializer for the provided Content-Type (defaults to str(data)) and passes `data` through the serializer. :param data: the data to serialize :type data: object :param content_type: the desired Content-Type for the provided `data` :type content_type: str :returns: str - the serialized data """ serializer = self._serializers.get(content_type, str) return serializer(data) def deserialize(self, data, content_type): """Deserialize the provided `data` from `content_type` This method will lookup the registered deserializer for the provided Content-Type (defaults to str(data)) and passes `data` through the deserializer. :param data: the data to deserialize :type data: str :param content_type: the Content-Type to deserialize `data` from :type content_type: str :returns: object - whatever the deserializer returns """ deserializer = self._deserializers.get(content_type, str) return deserializer(data) def get(self, key, counter=False): """Get the value of the key from the client's `bucket` :param key: the key to get from the bucket :type key: str :param counter: whether or not the `key` is a counter :type counter: bool :returns: object - the deserialized value of `key` :returns: None - if the call was not successful or the key was not found :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedServiceUnavailable` """ url = "%s/buckets/%s/keys/%s" % (self.base_url, self.bucket, key) if counter: url = "%s/buckets/%s/counters/%s" % (self.base_url, self.bucket, key) status, data, headers = self.pool.request(method="GET", url=url) if status == 400: raise exceptions.RiakcachedBadRequest(data) elif status == 503: raise exceptions.RiakcachedServiceUnavailable(data) if status not in (200, 300, 304): return None return self.deserialize(data, headers.get("content-type", "text/plain")) def get_many(self, keys): """Get the value of multiple keys at once from the client's `bucket` :param keys: the list of keys to get :type keys: list :returns: dict - the keys are the keys provided and the values are the results from calls to :func:`get`, except keys whose values are `None` are not included in the result :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedServiceUnavailable` """ results = dict((key, self.get(key)) for key in keys) return dict((key, value) for key, value in results.iteritems() if value is not None) def set(self, key, value, content_type="text/plain"): """Set the value of a key for the client's `bucket` :param key: the key to set the value for :type key: str :param value: the value to set, this will get serialized for the `content_type` :type value: object :param content_type: the Content-Type for `value` :type content_type: str :returns: bool - True if the call is successful, False otherwise :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedPreconditionFailed` """ value = self.serialize(value, content_type) status, data, _ = self.pool.request( method="POST", url="%s/buckets/%s/keys/%s" % (self.base_url, self.bucket, key), body=value, headers={ "Content-Type": content_type, }, ) if status == 400: raise exceptions.RiakcachedBadRequest(data) elif status == 412: raise exceptions.RiakcachedPreconditionFailed(data) return status in (200, 201, 204, 300) def set_many(self, values, content_type="text/plain"): """Set the value of multiple keys at once for the client's `bucket` :param values: the key -> value pairings for the keys to set :type values: dict :param content_type: the Content-Type for all of the values provided :type content_type: str :returns: dict - the keys are the keys provided and the values are True or False from the calls to :func:`set` :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedPreconditionFailed` """ return dict( (key, self.set(key, value, content_type)) for key, value in values.iteritems() ) def delete(self, key): """Delete the provided key from the client's `bucket` :param key: the key to delete :type key: str :returns: bool - True if the key was removed, False otherwise :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` """ status, data, _ = self.pool.request( method="DELETE", url="%s/buckets/%s/keys/%s" % (self.base_url, self.bucket, key), ) if status == 400: raise exceptions.RiakcachedBadRequest(data) return status in (204, 404) def delete_many(self, keys): """Delete multiple keys at once from the client's `bucket` :param keys: list of `str` keys to delete :type keys: list :returns: dict - the keys are the keys provided and the values are True or False from the calls to :func:`delete` :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` """ return dict((key, self.delete(key)) for key in keys) def stats(self): """Get the server stats :returns: dict - the stats from the server :returns: None - when the call is not successful """ status, data, _ = self.pool.request( method="GET", url="%s/stats" % self.base_url, ) if status == 200: return self.deserialize(data, "application/json") return None def props(self): """Get the properties for the client's `bucket` :returns: dict - the `bucket`'s set properties :returns: None - when the call is not successful """ status, data, _ = self.pool.request( method="GET", url="%s/buckets/%s/props" % (self.base_url, self.bucket), ) if status == 200: return json.loads(data) return None def set_props(self, props): """Set the properties for the client's `bucket` :param props: the properties to set :type props: dict :returns: bool - True if it is successful otherwise False """ status, _, _ = self.pool.request( method="PUT", url="%s/buckets/%s/props" % (self.base_url, self.bucket), body=self.serialize(props, "application/json"), headers={ "Content-Type": "application/json", } ) return status == 200 def keys(self): """Get a list of all keys :returns: list - list of keys on the server :returns: None - when the call is not successful """ status, data, _ = self.pool.request( method="GET", url="%s/buckets/%s/keys?keys=true" % (self.base_url, self.bucket), ) if status == 200: return self.deserialize(data, "application/json") return None def ping(self): """Ping the server to ensure it is up :returns: bool - True if it is successful, False otherwise """ status, _, _ = self.pool.request( method="GET", url="%s/ping" % self.base_url, ) return status == 200 def incr(self, key, value=1): """Increment the counter with the provided key :param key: the counter to increment :type key: str :param value: how much to increment by :type value: int :returns: bool - True/False whether or not it was successful :raises: :class:`riakcached.exceptions.RiakcachedConflict` :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` """ status, data, _ = self.pool.request( method="POST", url="%s/buckets/%s/counters/%s" % (self.base_url, self.bucket, key), body=str(value), ) if status == 409: raise exceptions.RiakcachedConflict(data) elif status == 400: raise exceptions.RiakcachedBadRequest(data) return status in (200, 201, 204, 300) class ThreadedRiakClient(RiakClient): """A threaded version of :class:`riakcached.clients.RiakClient` The threaded version uses threads to try to parallelize the {set,get,delete}_many method calls """ def _many(self, target, args_list): workers = [] worker_results = Queue.Queue() for args in args_list: args.append(worker_results) worker = threading.Thread(target=target, args=args) worker.daemon = True worker.start() workers.append(worker) for worker in workers: worker.join() results = {} while not worker_results.empty(): key, value = worker_results.get() results[key] = value return results def delete_many(self, keys): """Delete multiple keys at once from the client's `bucket` :param keys: list of `str` keys to delete :type keys: list :returns: dict - the keys are the keys provided and the values are True or False from the calls to :func:`delete` :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` """ def worker(key, results): results.put((key, self.delete(key))) args = [[key] for key in keys] return self._many(worker, args) def set_many(self, values): """Set the value of multiple keys at once for the client's `bucket` :param values: the key -> value pairings for the keys to set :type values: dict :param content_type: the Content-Type for all of the values provided :type content_type: str :returns: dict - the keys are the keys provided and the values are True or False from the calls to :func:`set` :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedPreconditionFailed` """ def worker(key, value, results): results.put((key, self.set(key, value))) args = [list(data) for data in values.items()] return self._many(worker, args) def get_many(self, keys): """Get the value of multiple keys at once from the client's `bucket` :param keys: the list of keys to get :type keys: list :returns: dict - the keys are the keys provided and the values are the results from calls to :func:`get`, except keys whose values are `None` are not included in the result :raises: :class:`riakcached.exceptions.RiakcachedBadRequest` :raises: :class:`riakcached.exceptions.RiakcachedServiceUnavailable` """ def worker(key, results): results.put((key, self.get(key))) args = [[key] for key in keys] results = self._many(worker, args) results = dict((key, value) for key, value in results.iteritems() if value is not None) return results or None
{ "repo_name": "brettlangdon/riakcached", "path": "riakcached/clients.py", "copies": "1", "size": "14758", "license": "mit", "hash": 4379898732396169000, "line_mean": 35.6203473945, "line_max": 98, "alpha_frac": 0.6029272259, "autogenerated": false, "ratio": 4.232291367938056, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5335218593838056, "avg_score": null, "num_lines": null }
# All Rights Reserved 2018 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import uuidutils from networking_l2gw.l2gatewayclient.osc import l2gw as osc_l2gw from networking_l2gw.l2gatewayclient.osc import l2gw_connection as \ osc_l2gw_conn class FakeL2GW(object): @staticmethod def create_l2gw(num_dev=1, num_if=1, attrs=None): """Create one fake L2 Gateway.""" attrs = attrs or {} interfaces = [{'name': 'interface' + uuidutils.generate_uuid(dashed=False)} for iface in range(num_if)] devices = [{'device_name': 'device' + uuidutils.generate_uuid(dashed=False), 'interfaces': interfaces} for dev in range(num_dev)] l2gw_attrs = { 'id': uuidutils.generate_uuid(), 'name': 'test-l2gw' + uuidutils.generate_uuid(dashed=False), 'tenant_id': uuidutils.generate_uuid(), 'devices': devices } l2gw_attrs.update(attrs) return copy.deepcopy(l2gw_attrs) @staticmethod def create_l2gws(attrs=None, count=1): """Create multiple fake L2 Gateways.""" l2gws = [] for i in range(0, count): if attrs is None: attrs = {'id': 'fake_id%d' % i} elif getattr(attrs, 'id', None) is None: attrs['id'] = 'fake_id%d' % i l2gws.append(FakeL2GW.create_l2gw(attrs=attrs)) return {osc_l2gw.L2_GATEWAYS: l2gws} class FakeL2GWConnection(object): @staticmethod def create_l2gw_connection(attrs=None): """Create a fake l2gw connection.""" attrs = attrs or {} l2gw_connection_attrs = { 'network_id': uuidutils.generate_uuid(), 'l2_gateway_id': uuidutils.generate_uuid(), 'segmentation_id': '42', 'tenant_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid() } l2gw_connection_attrs.update(attrs) return copy.deepcopy(l2gw_connection_attrs) @staticmethod def create_l2gw_connections(attrs=None, count=1): l2gw_connections = [] for i in range(0, count): if attrs is None: attrs = {'id': 'fake_id%d' % i} elif getattr(attrs, 'id', None) is None: attrs['id'] = 'fake_id%d' % i l2gw_connections.append(FakeL2GWConnection.create_l2gw_connection( attrs=attrs)) return {osc_l2gw_conn.L2_GATEWAY_CONNECTIONS: l2gw_connections}
{ "repo_name": "openstack/networking-l2gw", "path": "networking_l2gw/tests/unit/l2gatewayclient/osc/fakes.py", "copies": "1", "size": "3137", "license": "apache-2.0", "hash": 8493240963814560000, "line_mean": 33.4725274725, "line_max": 78, "alpha_frac": 0.5941982786, "autogenerated": false, "ratio": 3.660443407234539, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47546416858345386, "avg_score": null, "num_lines": null }
"""All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017 jahia2wp: an amazing tool ! Usage: jahia2wp.py download <site> [--debug | --quiet] [--username=<USERNAME> --host=<HOST> --zip-path=<ZIP_PATH> --force] jahia2wp.py download-many <csv_file> [--debug | --quiet] [--output-dir=<OUTPUT_DIR>] jahia2wp.py unzip <site> [--debug | --quiet] [--username=<USERNAME> --host=<HOST> --zip-path=<ZIP_PATH> --force] [--output-dir=<OUTPUT_DIR>] jahia2wp.py parse <site> [--debug | --quiet] [--output-dir=<OUTPUT_DIR>] [--use-cache] jahia2wp.py export <site> <wp_site_url> <unit_name_or_id> [--debug | --quiet] [--to-wordpress] [--clean-wordpress] [--to-dictionary] [--admin-password=<PASSWORD>] [--output-dir=<OUTPUT_DIR>] [--installs-locked=<BOOLEAN> --updates-automatic=<BOOLEAN>] [--openshift-env=<OPENSHIFT_ENV> --theme=<THEME>] [--use-cache] [--features-flags] [--keep-extracted-files] [--category=<SITE_CATEGORY>] jahia2wp.py clean <wp_env> <wp_url> [--debug | --quiet] [--stop-on-errors] [--no-backup] jahia2wp.py clean-many <csv_file> [--debug | --quiet] jahia2wp.py check <wp_env> <wp_url> [--debug | --quiet] jahia2wp.py generate <wp_env> <wp_url> [--debug | --quiet] [--wp-title=<WP_TITLE> --wp-tagline=<WP_TAGLINE> --admin-password=<PASSWORD>] [--theme=<THEME> --theme-faculty=<THEME-FACULTY>] [--installs-locked=<BOOLEAN> --automatic-updates=<BOOLEAN>] [--extra-config=<YAML_FILE>] [--category=<SITE_CATEGORY>] [--nosymlink | --wp-version=<version>] jahia2wp.py backup <wp_env> <wp_url> [--full] [--debug | --quiet] jahia2wp.py version <wp_env> <wp_url> [--debug | --quiet] jahia2wp.py admins <wp_env> <wp_url> [--debug | --quiet] jahia2wp.py generate-many <csv_file> [--debug | --quiet] [--nosymlink] jahia2wp.py export-many <csv_file> [--debug | --quiet] [--output-dir=<OUTPUT_DIR> --admin-password=<PASSWORD>] [--use-cache] [--keep-extracted-files] jahia2wp.py backup-many <csv_file> [--debug | --quiet] jahia2wp.py backup-inventory <path> [--debug | --quiet] jahia2wp.py rotate-backup <csv_file> [--dry-run] [--debug | --quiet] jahia2wp.py rotate-backup-inventory <path> [--dry-run] [--debug | --quiet] jahia2wp.py veritas <csv_file> [--debug | --quiet] jahia2wp.py fan-global-sitemap <csv_file> <wp_path> [--debug | --quiet] jahia2wp.py inventory <path> [--skip-users] [--debug | --quiet] jahia2wp.py shortcode-list <path> [--out-csv=<out_csv>] [--debug | --quiet] jahia2wp.py shortcode-details <path> <shortcode> [--debug | --quiet] [--out-csv=<out_csv>] jahia2wp.py shortcode-fix <wp_env> <wp_url> [<shortcode_name>] [--debug | --quiet] jahia2wp.py shortcode-fix-many <csv_file> [<shortcode_name>] [--debug | --quiet] jahia2wp.py block-fix <wp_env> <wp_url> [<block_name>] [--debug | --quiet] [--simulation] jahia2wp.py block-fix-inventory <path> [<block_name>] [--debug | --quiet] [--simulation [--log-time-csv]] jahia2wp.py shortcode-to-block <wp_env> <wp_url> [<shortcode_name>] [--debug | --quiet] [--simulation] [--posts] jahia2wp.py shortcode-to-block-many <csv_file> [<shortcode_name>] [--debug | --quiet] [--simulation [--log-time-csv]] [--posts] jahia2wp.py shortcode-to-block-inventory <path> [<shortcode_name>] [--debug | --quiet] [--simulation [--log-time-csv]] [--posts] jahia2wp.py extract-plugin-config <wp_env> <wp_url> <output_file> [--debug | --quiet] jahia2wp.py list-plugins <wp_env> <wp_url> [--debug | --quiet] [--config [--plugin=<PLUGIN_NAME>]] [--extra-config=<YAML_FILE>] jahia2wp.py update-plugins <wp_env> <wp_url> [--debug | --quiet] [--force-plugin] [--force-options] [--plugin=<PLUGIN_NAME>|--strict-list] [--extra-config=<YAML_FILE>] [--nosymlink] jahia2wp.py update-plugins-many <csv_file> [--debug | --quiet] [--force-plugin] [--force-options] [--plugin=<PLUGIN_NAME>|--strict-list] [--nosymlink] jahia2wp.py update-plugins-inventory <path> [--debug | --quiet] [--force-plugin] [--force-options] [--plugin=<PLUGIN_NAME>|--strict-list] [--extra-config=<YAML_FILE>] [--nosymlink] jahia2wp.py global-report <csv_file> [--output-dir=<OUTPUT_DIR>] [--use-cache] [--debug | --quiet] --root_wp_dest=</srv/../epfl> [--greedy] [--htaccess] [--context=<intra|inter|full>] [--dry_run] Options: -h --help Show this screen. -v --version Show version. --debug Set log level to DEBUG [default: INFO] --quiet Set log level to WARNING [default: INFO] """ import csv import time import getpass import json import logging import pickle import subprocess from collections import OrderedDict from datetime import datetime, date from pprint import pprint import os import shutil import sys import yaml from docopt import docopt from docopt_dispatch import dispatch from epflldap.ldap_search import get_unit_id, get_unit_name from ldap3.core.exceptions import LDAPSocketOpenError from rotate_backups import RotateBackups import settings from crawler import JahiaCrawler from exporter.dict_exporter import DictExporter from exporter.wp_exporter import WPExporter from parser.jahia_site import Site from settings import VERSION, FULL_BACKUP_RETENTION_THEME, INCREMENTAL_BACKUP_RETENTION_THEME, \ DEFAULT_THEME_NAME, BANNER_THEME_NAME, DEFAULT_CONFIG_INSTALLS_LOCKED, DEFAULT_CONFIG_UPDATES_AUTOMATIC, \ DEFAULT_WP_SITE_CATEGORY, DEFAULT_WP_VERSION from tracer.tracer import Tracer from unzipper.unzip import unzip_one from utils import Utils from veritas.casters import cast_boolean from veritas.veritas import VeritasValidor from wordpress import WPSite, WPConfig, WPGenerator, WPBackup, WPPluginConfigExtractor from fan.fan_global_sitemap import FanGlobalSitemap from migration2018.shortcodes import Shortcodes from migration2018.gutenbergblocks import GutenbergBlocks from migration2018.gutenbergfixes import GutenbergFixes def _check_site(wp_env, wp_url, **kwargs): """ Helper function to validate wp site given arguments """ wp_site = WPSite(wp_env, wp_url, wp_site_title=kwargs.get('wp_title')) wp_config = WPConfig(wp_site) if not wp_config.is_installed: raise SystemExit("No files found for {}".format(wp_site.url)) if not wp_config.is_config_valid: raise SystemExit("Configuration not valid for {}".format(wp_site.url)) return wp_config def _check_csv(csv_file): """ Check validity of CSV file containing sites information Arguments keywords csv_file -- Path to CSV file Return Instance of VeritasValidator """ validator = VeritasValidor(csv_file) # If errors found during validation if not validator.validate(): for error in validator.errors: logging.error(error.message) raise SystemExit("Invalid CSV file!") return validator def _get_default_language(languages): """ Return the default language If the site is in multiple languages, English is the default language """ if "en" in languages: return "en" else: return languages[0] def _set_default_language_in_first_position(default_language, languages): """ Set the default language in first position. It is important for the Polylang plugin that the default language is in first position. :param default_language: the default language :param languages: the list of languages """ if len(languages) > 1: languages.remove(default_language) languages.insert(0, default_language) return languages def _fix_menu_location(wp_generator, languages, default_language): """ Fix menu location for Polylang. After import, menus aren't displayed correctly so we need to add polylang config to fix this. :param wp_generator: WPGenerator instance used to create website. """ # Recovering installed theme theme = wp_generator.run_wp_cli("theme list --status=active --field=name --format=csv") if not theme: raise Exception("Cannot retrieve current active theme") nav_menus = {theme: {}} # Getting menu locations locations = wp_generator.run_wp_cli("menu location list --format=json") if not locations: raise Exception("Cannot retrieve menu location list") # Getting menu list menu_list = wp_generator.run_wp_cli("menu list --fields=slug,locations,term_id --format=json") if not menu_list: raise Exception("Cannot get menu list") # Looping through menu locations for location in json.loads(locations): # To store menu's IDs for all language and current location menu_lang_to_id = {} base_menu_slug = None # We have location, we have to found base slug of the menus which are at this location for menu in json.loads(menu_list): if location['location'] in menu['locations']: base_menu_slug = menu['slug'] break # If location doesn't contain any menu, we skip it if base_menu_slug is None: continue # We now have location (loc) and menu base slug (slug) # Looping through languages for language in languages: # Defining current slug name depending on language if language == default_language: menu_slug = base_menu_slug else: menu_slug = "{}-{}".format(base_menu_slug, language) # Value if not found menu_lang_to_id[language] = 0 # Looking for menu ID for given slug for menu in json.loads(menu_list): if menu_slug == menu['slug']: menu_lang_to_id[language] = menu['term_id'] break # We now have information for all menus in all languages for this location so we add infos nav_menus[theme][location['location']] = menu_lang_to_id # We update polylang config if not wp_generator.run_wp_cli("pll option update nav_menus '{}'".format(json.dumps(nav_menus))): raise Exception("Cannot update polylang option") def _add_extra_config(extra_config_file, current_config): """ Adds extra configuration information to current config Arguments keywords: extra_config_file -- YAML file in which is extra config current_config -- dict with current configuration Return: current_config dict merge with YAML file content""" if not os.path.exists(extra_config_file): raise Exception("Extra config file not found: {}".format(extra_config_file)) extra_config = yaml.load(open(extra_config_file, 'r')) return {**current_config, **extra_config} def _generate_csv_line(wp_generator): """ Generate a CSV line to add to source of truth. The line contains information about exported WP site. :param wp_generator: Object used to create WP website :return: """ # CSV columns in correct order for source of truth line generation csv_columns = OrderedDict() # Recovering values from WPGenerator or hardcode some csv_columns['wp_site_url'] = wp_generator._site_params['wp_site_url'] # from csv csv_columns['wp_tagline'] = wp_generator._site_params['wp_tagline'][wp_generator.default_lang()] # from parser csv_columns['wp_site_title'] = wp_generator._site_params['wp_site_title'] # from parser csv_columns['site_type'] = 'wordpress' csv_columns['openshift_env'] = 'subdomains' csv_columns['category'] = 'GeneralPublic' # from csv csv_columns['theme'] = wp_generator._site_params['theme'] # from csv csv_columns['theme_faculty'] = wp_generator._site_params['theme_faculty'] # from parser csv_columns['status'] = 'yes' csv_columns['installs_locked'] = wp_generator._site_params['installs_locked'] # from csv (bool) csv_columns['updates_automatic'] = wp_generator._site_params['updates_automatic'] # from csv (bool) csv_columns['langs'] = wp_generator._site_params['langs'] # from parser csv_columns['unit_name'] = wp_generator._site_params['unit_name'] # from csv csv_columns['unit_id'] = wp_generator._site_params['unit_id'] # from csv csv_columns['comment'] = 'Migrated from Jahia to WP {}'.format(date.today()) # Formatting values depending on their type/content for col in csv_columns: # Bool are translated to 'yes' or 'no' if isinstance(csv_columns[col], bool): csv_columns[col] = 'yes' if csv_columns[col] else 'no' # None become empty string elif csv_columns[col] is None: csv_columns[col] = '' logging.info("Here is the line with up-to-date information to add in source of truth:\n") logging.info('"%s"', '","'.join(csv_columns.values())) def _init_shortcode_to_csv_time_log(): """ Returns log filename to use when transforming shortcode to blocks """ now = datetime.now() filename = os.path.join(settings.MIGRATION_LOG_PATH, "time_{}.csv".format(now.strftime("%Y-%m-%d_%H-%M-%S"))) # We just create file with header columns with open(filename, 'w') as l: l.write("URL;Nb page;Nb pages updated;Nb shortcodes;Duration [s]\n") return filename @dispatch.on('download') def download(site, username=None, host=None, zip_path=None, force=False, **kwargs): # prompt for password if username is provided password = None if username is not None: password = getpass.getpass(prompt="Jahia password for user '{}': ".format(username)) crawler = JahiaCrawler(site, username=username, password=password, host=host, zip_path=zip_path, force=force) return crawler.download_site() @dispatch.on('download-many') def download_many(csv_file, output_dir=None, **kwargs): TRACER_FILE_NAME = "tracer_empty_jahia_zip.csv" if output_dir is None: output_dir = settings.JAHIA_ZIP_PATH tracer_path = os.path.join(output_dir, TRACER_FILE_NAME) rows = Utils.csv_filepath_to_dict(csv_file) # download jahia zip file for each row print("\nJahia zip files will now be downloaded...") for index, row in enumerate(rows): print("\nIndex #{}:\n---".format(index)) try: download(site=row['Jahia_zip']) except Exception: with open(tracer_path, 'a', newline='\n') as tracer: tracer.write( "{}, {}\n".format( '{0:%Y-%m-%d %H:%M:%S}'.format(datetime.now()), row['Jahia_zip'] ) ) tracer.flush() logging.info("All jahia zip files downloaded !") @dispatch.on('unzip') def unzip(site, username=None, host=None, zip_path=None, force=False, output_dir=None, **kwargs): # get zip file zip_file = download(site, username, host, zip_path, force) if output_dir is None: output_dir = settings.JAHIA_DATA_PATH try: return unzip_one(output_dir, site, zip_file) except Exception as err: logging.error("%s - unzip - Could not unzip file - Exception: %s", site, err) raise err @dispatch.on('parse') def parse(site, output_dir=None, use_cache=False, **kwargs): """ Parse the give site. """ try: # without changing this parameter the following sites crash # when they are dumped on disk with pickle: # biorob, disopt, euler, last, master-architecture # they are probably corrupted, so this is simply a hack # to make it work sys.setrecursionlimit(2000) # create subdir in output_dir site_dir = unzip(site, output_dir=output_dir) # where to cache our parsing pickle_file_path = os.path.join(site_dir, 'parsed_{}.pkl'.format(site)) # when using-cache: check if already parsed pickle_site = False if use_cache: if os.path.exists(pickle_file_path): with open(pickle_file_path, 'rb') as pickle_content: pickle_site = pickle.load(pickle_content) logging.info("Using the cached pickle file at %s" % pickle_file_path) logging.info("Parsing Jahia xml files from %s...", site_dir) if pickle_site: site = pickle_site else: logging.info("Cache not used, parsing the Site") site = Site(site_dir, site, fix_problematic_chars=True) print(site.report) # always save the parsed data on disk, so we can use the # cache later if we want with open(pickle_file_path, 'wb') as output: logging.info("Parsed site saved into %s" % pickle_file_path) pickle.dump(site, output, pickle.HIGHEST_PROTOCOL) # log success logging.info("Site %s successfully parsed" % site) Tracer.write_row(site=site.name, step="parse", status="OK") return site except Exception as err: logging.error("%s - parse - Exception: %s", site, err) raise err @dispatch.on('export') def export(site, wp_site_url, unit_name_or_id, to_wordpress=False, clean_wordpress=False, to_dictionary=False, admin_password=None, output_dir=None, theme=None, installs_locked=False, updates_automatic=False, openshift_env=None, use_cache=None, keep_extracted_files=False, features_flags=False, category=None, **kwargs): """ Export the jahia content into a WordPress site. :param site: the name of the WordPress site :param wp_site_url: URL of WordPress site :param unit_name_or_id: unit name or unit ID of the WordPress site :param to_wordpress: to migrate data :param clean_wordpress: to clean data :param admin_password: an admin password :param output_dir: directory where the jahia zip file will be unzipped :param theme: WordPress theme used for the WordPress site :param installs_locked: boolean :param updates_automatic: boolean :param openshift_env: openshift_env environment (prod, int, gcharmier ...) :param keep_extracted_files: command to keep files extracted from jahia zip :param features_flags: Tell to clean page content or not :param category: Site category which defines plugin list to install and configure """ # Download, Unzip the jahia zip and parse the xml data site = parse(site=site, use_cache=use_cache, output_dir=output_dir) # Define the default language default_language = _get_default_language(site.languages) # For polylang plugin, we need position default lang in first position languages = _set_default_language_in_first_position(default_language, site.languages) if not site.acronym[default_language]: logging.warning("No wp site title in %s", default_language) wp_site_title = None else: wp_site_title = site.acronym[default_language] # theme if not site.theme[default_language] or site.theme[default_language] == "epfl": theme_faculty = "" else: theme_faculty = site.theme[default_language] if not theme: # Setting correct theme depending on parsing result theme = BANNER_THEME_NAME if default_language in site.banner else DEFAULT_THEME_NAME # If nothing specified, we use default if category is None: category = DEFAULT_WP_SITE_CATEGORY # tagline if not site.title[default_language]: logging.warning("No wp tagline in %s", default_language) wp_tagline = None else: wp_tagline = site.title if unit_name_or_id.isdigit(): unit_id = unit_name_or_id # fetch unit name from ldap try: logging.info("Fetching LDAP for unit '%s' name...", unit_id) unit_name = get_unit_name(unit_id) logging.info("LDAP name found = %s...", unit_name) except LDAPSocketOpenError: logging.error("LDAP is not responding, aborting here...") raise else: # We get unit name unit_name = unit_name_or_id # fetch unit id from ldap try: logging.info("Fetching LDAP for unit '%s' ID...", unit_name) unit_id = get_unit_id(unit_name) logging.info("LDAP ID found = %s...", unit_id) except LDAPSocketOpenError: logging.error("LDAP is not responding, aborting here...") raise info = { # information from parser 'langs': ",".join(languages), 'wp_site_title': wp_site_title, 'wp_tagline': wp_tagline, 'theme_faculty': theme_faculty, 'unit_name': unit_name, # information from source of truth 'openshift_env': openshift_env, 'wp_site_url': wp_site_url, 'theme': theme, 'updates_automatic': updates_automatic, 'installs_locked': installs_locked, 'category': category, # determined information 'unit_id': unit_id, 'from_export': True } # skip options, used only during development # # skip_base: if True don't install WordPress, use the existing site # skip_media: if True don't import the media # skip_pages: if True don't import the pages skip_base = False skip_media = False skip_pages = False # List of plugins to let in 'deactivated' state during import. To earn more time, they are not activated during # WordPress empty site generation. Because activating them takes time and we have to take the same amount of time # to deactivate them before running Jahia site import. # Deactivating plugins can improve import time by ~80% # WARNING: be careful with the list order. Plugins will be reactivated after import by using list order. So if # there are dependencies between plugins, arrange them in the right way. deactivated_plugins = ['mainwp-child', 'EPFL-Content-Filter', 'feedzy-rss-feeds', 'remote-content-shortcode', 'shortcode-ui', 'shortcode-ui-richtext', # This one needs to come after the previous one 'simple-sitemap', 'svg-support', 'tinymce-advanced', 'varnish-http-purge', 'epfl', 'epfl-404', 'epfl-infoscience', 'wp-media-folder', 'pdfjs-viewer-shortcode', ] # Generate a WordPress site wp_generator = WPGenerator(info, admin_password) # base installation if skip_base: logging.info("Deactivating %s plugins...", len(deactivated_plugins)) for plugin_name in deactivated_plugins: # We do a 'try' to handle missing plugins (if exists) try: wp_generator.run_wp_cli("plugin deactivate {}".format(plugin_name)) except: logging.info("Plugin %s doesn't seem's to be installed", plugin_name) try: # even if we skip the base installation we need to reactivate # the basic auth plugin for the rest API wp_generator.run_wp_cli("plugin activate Basic-Auth") except: # if activation fails it means the plugin is not installed wp_generator.install_basic_auth_plugin() else: # If returns false, it means there was an error if not wp_generator.generate(deactivated_plugins): # We just display line to add to CSV _generate_csv_line(wp_generator) return wp_generator.install_basic_auth_plugin() # dual auth if settings.ACTIVE_DUAL_AUTH: wp_generator.active_dual_auth() # exporter wp_exporter = WPExporter( site, wp_generator, default_language, output_dir=output_dir ) # clean if clean_wordpress: logging.info("Cleaning WordPress for %s...", site.name) wp_exporter.delete_all_content() logging.info("Data of WordPress site %s successfully deleted", site.name) # to WordPress if to_wordpress: logging.info("Exporting %s to WordPress...", site.name) try: if wp_generator.get_number_of_pages() == 0: wp_exporter.import_data_to_wordpress(skip_pages=skip_pages, skip_media=skip_media, features_flags=features_flags) wp_exporter.write_redirections() _fix_menu_location(wp_generator, languages, default_language) logging.info("Reactivating %s plugins...", len(deactivated_plugins)) # Reactivating plugins for plugin_name in deactivated_plugins: # We do a 'try' to handle missing plugins (if exists) try: wp_generator.run_wp_cli("plugin activate {}".format(plugin_name)) except: logging.info("Plugin %s doesn't seem's to be installed", plugin_name) logging.info("Site %s successfully exported to WordPress", site.name) else: logging.info("Site %s already exported to WordPress", site.name) except (Exception, subprocess.CalledProcessError) as e: logging.error(str(e)) Tracer.write_row(site=site.name, step=e, status="KO") if not settings.DEBUG: wp_generator.clean() raise e Tracer.write_row(site=site.name, step="export", status="OK") wp_generator.uninstall_basic_auth_plugin() wp_generator.enable_updates_automatic_if_allowed() # to dictionary if to_dictionary: data = DictExporter.generate_data(site) pprint(data, width=settings.LINE_LENGTH_ON_PPRINT) _generate_csv_line(wp_generator) if not keep_extracted_files: # Delete extracted zip files # We take dirname because site.base_path is the path to the subfolder in the zip. # Example : path_to_extract/dcsl/dcsl # And we want to delete path_to_extract/dcsl base_zip_path = os.path.dirname(os.path.abspath(site.base_path)) logging.debug("Removing zip extracted folder '%s'", base_zip_path) if os.path.exists(base_zip_path): shutil.rmtree(base_zip_path) return wp_exporter @dispatch.on('fan-global-sitemap') def fan_global_sitemap(csv_file, wp_path, **kwargs): """ Create a global sitemap at the given wp_path. Prerequisites: - You have installed WordPress at the given wp_path WITHOUT polylang (comment the polylang plugin in config-lot1.yml). Note: it's not enough to disable polylang after installation After having launched this script: - Install polylang - Go to "Languages" and : - Add the English language - click on the link "You can set them all to the default language" - In "Appearance" > "Menus" set the "Main" menu to "Primary menu English" and "footer_nav" to "Footer menu English" """ generator = FanGlobalSitemap(csv_file, wp_path) generator.create_website() @dispatch.on('export-many') def export_many(csv_file, output_dir=None, admin_password=None, use_cache=None, keep_extracted_files=False, **kwargs): rows = Utils.csv_filepath_to_dict(csv_file) # create a new WP site for each row print("\n{} websites will now be generated...".format(len(rows))) for index, row in enumerate(rows): print("\nIndex #{}:\n---".format(index)) # CSV file is utf-8 so we encode correctly the string to avoid errors during logging.debug display row_bytes = repr(row).encode('utf-8') logging.debug("%s - row %s: %s", row["wp_site_url"], index, row_bytes) features_flags = False if 'features_flags' not in row else row['features_flags'] == 'yes' try: export( site=row['Jahia_zip'], wp_site_url=row['wp_site_url'], # We use unit_id as first option if exists even if we have unit_name because it doesn't change unit_name_or_id=row['unit_id'] if 'unit_id' in row else row['unit_name'], to_wordpress=True, clean_wordpress=False, output_dir=output_dir, theme=row['theme'], installs_locked=row['installs_locked'], updates_automatic=row['updates_automatic'], wp_env=row['openshift_env'], admin_password=admin_password, use_cache=use_cache, keep_extracted_files=keep_extracted_files, features_flags=features_flags, category=row['category'] ) except (Exception, subprocess.CalledProcessError) as e: logging.error(str(e)) Tracer.write_row(site=row['Jahia_zip'], step=e, status="KO") @dispatch.on('check') def check(wp_env, wp_url, **kwargs): wp_config = _check_site(wp_env, wp_url, **kwargs) # run a few more tests if not wp_config.is_install_valid: raise SystemExit("Could not login or use site at {}".format(wp_config.wp_site.url)) # success case print("WordPress site valid and accessible at {}".format(wp_config.wp_site.url)) @dispatch.on('clean') def clean(wp_env, wp_url, stop_on_errors=False, no_backup=False, **kwargs): # when forced, do not check the status of the config -> just remove everything possible if stop_on_errors: _check_site(wp_env, wp_url, **kwargs) # config found: proceed with cleaning # FIXME: Il faut faire un clean qui n'a pas besoin de unit_name wp_generator = WPGenerator({'openshift_env': wp_env, 'wp_site_url': wp_url}) # backup before the clean, in case we need to get it back if not no_backup: backup(wp_env, wp_url, full=True) if wp_generator.clean(): print("Successfully cleaned WordPress site {}".format(wp_generator.wp_site.url)) @dispatch.on('clean-many') def clean_many(csv_file, **kwargs): rows = Utils.csv_filepath_to_dict(csv_file) # clean WP site for each row print("\n{} websites will now be cleaned...".format(len(rows))) for index, row in enumerate(rows): print("\nIndex #{}:\n---".format(index)) # CSV file is utf-8 so we encode correctly the string to avoid errors during logging.debug display row_bytes = repr(row).encode('utf-8') logging.debug("%s - row %s: %s", row["wp_site_url"], index, row_bytes) clean(row['openshift_env'], row['wp_site_url']) @dispatch.on('generate') def generate(wp_env, wp_url, wp_title=None, wp_tagline=None, admin_password=None, theme=None, theme_faculty=None, category=None, installs_locked=None, updates_automatic=None, extra_config=None, nosymlink=None, wp_version=None, **kwargs): """ This command may need more params if reference to them are done in YAML file. In this case, you'll see an error explaining which params are needed and how they can be added to command line """ # if nothing is specified we want a locked install if installs_locked is None: installs_locked = DEFAULT_CONFIG_INSTALLS_LOCKED else: installs_locked = cast_boolean(installs_locked) # if nothing is specified we want automatic updates if updates_automatic is None: updates_automatic = DEFAULT_CONFIG_UPDATES_AUTOMATIC else: updates_automatic = cast_boolean(updates_automatic) # If nothing specified, we use default if category is None: category = DEFAULT_WP_SITE_CATEGORY # FIXME: When we will use 'unit_id' from CSV file, add parameter here OR dynamically get it from AD all_params = {'openshift_env': wp_env, 'wp_site_url': wp_url, 'theme': theme or DEFAULT_THEME_NAME, 'installs_locked': installs_locked, 'updates_automatic': updates_automatic, 'category': category, 'wp_version': wp_version} # Adding parameters if given if theme_faculty is not None: all_params['theme_faculty'] = theme_faculty if wp_title is not None: all_params['wp_site_title'] = wp_title if wp_tagline is not None: all_params['wp_tagline'] = wp_tagline # if we have extra configuration to load, if extra_config is not None: all_params = _add_extra_config(extra_config, all_params) wp_generator = WPGenerator(all_params, admin_password=admin_password) if not wp_generator.generate(no_symlink=nosymlink): raise Exception("Generation failed. More info above") print("Successfully created new WordPress site at {}".format(wp_generator.wp_site.url)) @dispatch.on('backup') def backup(wp_env, wp_url, full=False, **kwargs): wp_backup = WPBackup(wp_env, wp_url, full) if not wp_backup.backup(): raise SystemExit("Backup failed. More info above") print("Successful {} backup for {}".format( wp_backup.backup_pattern, wp_backup.wp_site.url)) @dispatch.on('version') def version(wp_env, wp_url, **kwargs): wp_config = _check_site(wp_env, wp_url, **kwargs) # success case print(wp_config.wp_version) @dispatch.on('admins') def admins(wp_env, wp_url, **kwargs): wp_config = _check_site(wp_env, wp_url, **kwargs) # success case for admin in wp_config.admins: print(admin) @dispatch.on('generate-many') def generate_many(csv_file, nosymlink=None, **kwargs): # CSV file validation validator = _check_csv(csv_file) # create a new WP site for each row print("\n{} websites will now be generated...".format(len(validator.rows))) for index, row in enumerate(validator.rows): print("\nIndex #{}:\n---".format(index)) # CSV file is utf-8 so we encode correctly the string to avoid errors during logging.debug display row_bytes = repr(row).encode('utf-8') logging.debug("%s - row %s: %s", row["wp_site_url"], index, row_bytes) WPGenerator(row).generate(no_symlink=nosymlink) @dispatch.on('backup-many') def backup_many(csv_file, **kwargs): # CSV file validation validator = _check_csv(csv_file) # create a new WP site backup for each row print("\n{} websites will now be backuped...".format(len(validator.rows))) for index, row in enumerate(validator.rows): logging.debug("%s - row %s: %s", row["wp_site_url"], index, row) try: WPBackup( row["openshift_env"], row["wp_site_url"] ).backup() except: logging.error("Site %s - Error %s", row["wp_site_url"], sys.exc_info()) @dispatch.on('backup-inventory') def backup_inventory(path, **kwargs): logging.info("Backup from inventory...") for site_details in WPConfig.inventory(path, skip_users=True): if site_details.valid == settings.WP_SITE_INSTALL_OK: logging.info("Running backup for %s", site_details.url) try: WPBackup( WPSite.openshift_env_from_path(site_details.path), site_details.url ).backup() except: logging.error("Site %s - Error %s", site_details.url, sys.exc_info()) logging.info("All backups done for path: %s", path) @dispatch.on('rotate-backup-inventory') def rotate_backup_inventory(path, dry_run=False, **kwargs): for site_details in WPConfig.inventory(path): if site_details.valid == settings.WP_SITE_INSTALL_OK: try: path = WPBackup( WPSite.openshift_env_from_path(site_details.path), site_details.url ).path # rotate full backups first for pattern in [["*full.sql"], ["*full.tar", "*full.tar.gz"]]: RotateBackups( FULL_BACKUP_RETENTION_THEME, dry_run=dry_run, include_list=pattern ).rotate_backups(path) # rotate incremental backups for pattern in [["*.list"], ["*inc.sql"], ["*inc.tar", "*inc.tar.gz"]]: RotateBackups( INCREMENTAL_BACKUP_RETENTION_THEME, dry_run=dry_run, include_list=pattern ).rotate_backups(path) except: logging.error("Site %s - Error %s", site_details.url, sys.exc_info()) @dispatch.on('rotate-backup') def rotate_backup(csv_file, dry_run=False, **kwargs): # CSV file validation validator = _check_csv(csv_file) for index, row in enumerate(validator.rows): try: path = WPBackup(row["openshift_env"], row["wp_site_url"]).path # rotate full backups first for pattern in [["*full.sql"], ["*full.tar", "*full.tar.gz"]]: RotateBackups( FULL_BACKUP_RETENTION_THEME, dry_run=dry_run, include_list=pattern ).rotate_backups(path) # rotate incremental backups for pattern in [["*.list"], ["*inc.sql"], ["*inc.tar", "*inc.tar.gz"]]: RotateBackups( INCREMENTAL_BACKUP_RETENTION_THEME, dry_run=dry_run, include_list=pattern ).rotate_backups(path) except: logging.error("Site %s - Error %s", row["wp_site_url"], sys.exc_info()) @dispatch.on('shortcode-details') def shortcode_details(path, shortcode, out_csv=None, **kwargs): """ Go through websites present in 'path' and list all usages of a given shortcode :param path: Path where to look for WP installs :param shortcode: Shortcode to look for. It can be shortcodes separated with a comma :param out_csv: CSV file to save result :param kwargs: :return: """ logging.info("Listing used shortcodes in path %s...", path) shortcodes = Shortcodes() details = shortcodes.get_details(path, shortcode.split(",")) # If CSV output is requested if out_csv: with open(out_csv, 'w') as out: # Adding one line for each couple "shortcode", "website" for site_path, shortcode_call_list in details.items(): for shortcode_infos in shortcode_call_list: out.write('{};{};{}\n'.format(site_path, shortcode_infos['post_url'], shortcode_infos['shortcode_call'])) logging.info("Output can be found in %s", out_csv) else: print(details) logging.info("Shortcodes details done!") @dispatch.on('shortcode-list') def shortcode_list(path, out_csv=None, **kwargs): """ Go through websites present in 'path' and list all used shortcodes :param path: Path where to look for WP installs :param out_csv: CSV file to save result :param kwargs: :return: """ logging.info("Listing used shortcodes in path %s...", path) shortcodes = Shortcodes() shortcodes.locate_existing(path) print("# shortcodes found: {}".format(len(shortcodes.list.keys()))) # If CSV output is requested if out_csv: with open(out_csv, 'w') as out: # Adding one line for each couple "shortcode", "website" for shortcode, site_path_list in shortcodes.list.items(): for site_path in site_path_list: out.write("{};{}\n".format(shortcode, site_path)) logging.info("Output can be found in %s", out_csv) else: print(shortcodes.list) logging.info("Shortcodes list done!") @dispatch.on('shortcode-fix') def shortcode_fix(wp_env, wp_url, shortcode_name=None, **kwargs): shortcodes = Shortcodes() report = shortcodes.fix_site(wp_env, wp_url, shortcode_name) logging.info("Fix report:\n%s", str(report)) @dispatch.on('shortcode-fix-many') def shortcode_fix_many(csv_file, shortcode_name=None, **kwargs): rows = Utils.csv_filepath_to_dict(csv_file) print("\nShortcode will now be fixed on websites...") for index, row in enumerate(rows): print("\nIndex #{}:\n---".format(index)) shortcode_fix(row['openshift_env'], row['wp_site_url'], shortcode_name) logging.info("All shortcodes for all sites fixed !") @dispatch.on('block-fix') def block_fix(wp_env, wp_url, block_name=None, simulation=False, csv_time_log=None, **kwargs): logging.info("Fixing blocks for %s", wp_url) if simulation: logging.info("== SIMULATION EXECUTION ==") # We have to log duration in CSV file if csv_time_log: time_log_file = open(csv_time_log, mode='a') start_time = time.time() blocks = GutenbergFixes() report = blocks.fix_site(wp_env, wp_url, shortcode_name=block_name, simulation=simulation) if simulation: logging.info("This was a simulation, nothing was changed in database") if csv_time_log: time_log_file.write("{};{};{};{};{}\n".format(wp_url, report['_nb_pages'], report['_nb_pages_updated'], report['_nb_shortcodes'], time.time()-start_time)) time_log_file.close() logging.info("Fix report:\n%s", str(report)) @dispatch.on('block-fix-inventory') def block_fix_inventory(path, block_name=None, simulation=False, log_time_csv=False, **kwargs): logging.info("Block fix from inventory...") nb_sites = 0 csv_time_log = _init_shortcode_to_csv_time_log() if log_time_csv else None if log_time_csv: logging.info("Logging time in CSV file: %s", csv_time_log) for site_details in WPConfig.inventory(path, skip_users=True): if site_details.valid == settings.WP_SITE_INSTALL_OK: try: block_fix(WPSite.openshift_env_from_path(site_details.path), site_details.url, block_name=block_name, simulation=simulation, csv_time_log=csv_time_log) nb_sites += 1 except: logging.error("Site %s - Error %s", site_details.url, sys.exc_info()) logging.info("%s sites processed for path: %s", nb_sites, path) @dispatch.on('shortcode-to-block') def shortcode_to_block(wp_env, wp_url, shortcode_name=None, simulation=False, csv_time_log=None, posts=False, **kwargs): logging.info("Migrating shortcodes to blocks for %s", wp_url) if simulation: logging.info("== SIMULATION EXECUTION ==") # We have to log duration in CSV file if csv_time_log: time_log_file = open(csv_time_log, mode='a') start_time = time.time() page_or_post = 'post' if posts else 'page' blocks = GutenbergBlocks() report = blocks.fix_site(wp_env, wp_url, shortcode_name=shortcode_name, simulation=simulation, elem_type=page_or_post) if simulation: logging.info("This was a simulation, nothing was changed in database") if csv_time_log: time_log_file.write("{};{};{};{};{}\n".format(wp_url, report['_nb_pages'], report['_nb_pages_updated'], report['_nb_shortcodes'], time.time()-start_time)) time_log_file.close() logging.info("Fix report:\n%s", str(report)) @dispatch.on('shortcode-to-block-many') def shortcode_to_block_many(csv_file, shortcode_name=None, simulation=False, log_time_csv=False, posts=False, **kwargs): rows = Utils.csv_filepath_to_dict(csv_file) csv_time_log = _init_shortcode_to_csv_time_log() if log_time_csv else None if log_time_csv: logging.info("Logging time in CSV file: %s", csv_time_log) print("\nShortcode will now be fixed on websites...") for index, row in enumerate(rows): print("\nIndex #{}:\n---".format(index)) shortcode_to_block(row['openshift_env'], row['wp_site_url'], shortcode_name=shortcode_name, simulation=simulation, csv_time_log=csv_time_log, posts=posts) logging.info("All shortcodes for all sites fixed !") @dispatch.on('shortcode-to-block-inventory') def shortcode_to_block_inventory(path, shortcode_name=None, simulation=False, log_time_csv=False, posts=False, **kwargs): logging.info("Shortcodes to block from inventory...") nb_sites = 0 csv_time_log = _init_shortcode_to_csv_time_log() if log_time_csv else None if log_time_csv: logging.info("Logging time in CSV file: %s", csv_time_log) for site_details in WPConfig.inventory(path, skip_users=True): if site_details.valid == settings.WP_SITE_INSTALL_OK: try: shortcode_to_block(WPSite.openshift_env_from_path(site_details.path), site_details.url, shortcode_name=shortcode_name, simulation=simulation, csv_time_log=csv_time_log, posts=posts) nb_sites += 1 except: logging.error("Site %s - Error %s", site_details.url, sys.exc_info()) logging.info("%s sites processed for path: %s", nb_sites, path) @dispatch.on('inventory') def inventory(path, skip_users=False, **kwargs): logging.info("Building inventory...") print(";".join(['path', 'valid', 'url', 'version', 'db_name', 'db_user', 'admins'])) for site_details in WPConfig.inventory(path, skip_users): print(";".join([ site_details.path, site_details.valid, site_details.url, site_details.version, site_details.db_name, site_details.db_user, site_details.admins ])) logging.info("Inventory made for %s", path) @dispatch.on('veritas') def veritas(csv_file, **kwargs): validator = VeritasValidor(csv_file) if not validator.validate(): validator.print_errors() else: print("CSV file validated!") @dispatch.on('extract-plugin-config') def extract_plugin_config(wp_env, wp_url, output_file, **kwargs): wp_site = WPSite(wp_env, wp_url) ext = WPPluginConfigExtractor(wp_site) ext.extract_config(output_file) @dispatch.on('list-plugins') def list_plugins(wp_env, wp_url, config=False, plugin=None, extra_config=None, **kwargs): """ This command may need more params if reference to them are done in YAML file. In this case, you'll see an error explaining which params are needed and how they can be added to command line """ # FIXME: When we will use 'unit_id' from CSV file, add parameter here OR dynamically get it from AD all_params = {'openshift_env': wp_env, 'wp_site_url': wp_url} # if we have extra configuration to load, if extra_config is not None: all_params = _add_extra_config(extra_config, all_params) print(WPGenerator(all_params).list_plugins(config, plugin)) @dispatch.on('update-plugins') def update_plugins(wp_env, wp_url, plugin=None, force_plugin=False, force_options=False, strict_list=False, extra_config=None, nosymlink=False, **kwargs): _check_site(wp_env, wp_url, **kwargs) all_params = {'openshift_env': wp_env, 'wp_site_url': wp_url} # if we have extra configuration to load, if extra_config is not None: all_params = _add_extra_config(extra_config, all_params) wp_generator = WPGenerator(all_params) wp_generator.update_plugins(only_one=plugin, force_plugin=force_plugin, force_options=force_options, strict_plugin_list=strict_list, no_symlink=nosymlink) print("Successfully updated WordPress plugin list at {}".format(wp_generator.wp_site.url)) @dispatch.on('update-plugins-many') def update_plugins_many(csv_file, plugin=None, force_plugin=False, force_options=False, strict_list=False, nosymlink=False, **kwargs): # CSV file validation validator = _check_csv(csv_file) # Update WP site plugins for each row print("\n{} websites will now be updated...".format(len(validator.rows))) for index, row in enumerate(validator.rows): print("\nIndex #{}:\n---".format(index)) logging.debug("%s - row %s: %s", row["wp_site_url"], index, row) WPGenerator(row).update_plugins(only_one=plugin, force_plugin=force_plugin, force_options=force_options, strict_plugin_list=strict_list, no_symlink=nosymlink) @dispatch.on('update-plugins-inventory') def update_plugins_inventory(path, plugin=None, force_plugin=False, force_options=False, strict_list=False, extra_config=None, nosymlink=False, **kwargs): logging.info("Update plugins from inventory...") for site_details in WPConfig.inventory(path): if site_details.valid == settings.WP_SITE_INSTALL_OK: logging.info("Updating plugins for %s", site_details.url) all_params = {'openshift_env': WPSite.openshift_env_from_path(site_details.path), 'wp_site_url': site_details.url} # if we have extra configuration to load, if extra_config is not None: all_params = _add_extra_config(extra_config, all_params) WPGenerator(all_params).update_plugins(only_one=plugin, force_plugin=force_plugin, force_options=force_options, strict_plugin_list=strict_list, no_symlink=nosymlink) logging.info("All plugins updates done for path: %s", path) @dispatch.on('global-report') def global_report(csv_file, output_dir=None, use_cache=False, **kwargs): """Generate a global report with stats like the number of pages, files and boxes""" path = os.path.join(output_dir, "global-report.csv") logging.info("Generating global report at %s" % path) rows = Utils.csv_filepath_to_dict(csv_file) # the reports reports = [] # all the box types box_types = set() for index, row in enumerate(rows): try: # parse the Site site = parse(site=row['Jahia_zip'], use_cache=use_cache) # add the report info reports.append(site.get_report_info()) # add the site's box types for key in site.num_boxes.keys(): if key: box_types.add(key) except Exception as e: logging.error("Site %s - Error %s", row['Jahia_zip'], e) # the base field names for the csv fieldnames = ["name", "pages", "files"] # add all the box types fieldnames.extend(sorted(box_types)) # complete the reports with 0 for box types unknown to a Site for box_type in box_types: for report in reports: if box_type not in report: report[box_type] = 0 # some reports have an empty string if "" in report: del report[""] # write the csv file with open(path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) # header writer.writeheader() for report in reports: try: writer.writerow(report) except Exception as e: logging.error("Site %s - Error %s", report['name'], e) if __name__ == '__main__': # docopt return a dictionary with all arguments # __doc__ contains package docstring args = docopt(__doc__, version=VERSION) # set logging config before anything else Utils.set_logging_config(args) logging.debug(args) dispatch(__doc__)
{ "repo_name": "epfl-idevelop/jahia2wp", "path": "src/jahia2wp.py", "copies": "1", "size": "54219", "license": "mit", "hash": 86451569473090190, "line_mean": 38.1472924188, "line_max": 122, "alpha_frac": 0.5952156993, "autogenerated": false, "ratio": 3.835255004597864, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9917288541398239, "avg_score": 0.0026364324999248556, "num_lines": 1385 }
""" All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017 """ from django.core.validators import URLValidator, ValidationError from utils import Utils from .validators import validate_string, validate_yes_or_no, validate_integer, \ validate_openshift_env, validate_site_type, validate_theme, validate_theme_faculty, validate_languages, \ mock_validate_unit BASE_COLUMNS = [ ("wp_site_url", URLValidator(schemes=['https']), True), ("wp_site_title", validate_string, False), ("wp_tagline", validate_string, False), ("site_type", validate_site_type, False), ("openshift_env", validate_openshift_env, False), # category => no validation ("theme", validate_theme, False), ("theme_faculty", validate_theme_faculty, False), # status => no validation ("installs_locked", validate_yes_or_no, False), ("updates_automatic", validate_yes_or_no, False), ("langs", validate_languages, False), ("unit_id", validate_integer, False), # comment => no validation ] if Utils.get_optional_env('TRAVIS', False): JAHIA2WP_COLUMNS = BASE_COLUMNS + [ ("unit_name", mock_validate_unit, False), ] else: JAHIA2WP_COLUMNS = BASE_COLUMNS MOCK_JAHIA2WP_COLUMNS = BASE_COLUMNS + [ ("unit_name", mock_validate_unit, False), ] class VeritasValidor: """ Validates a CSV file containing a list of WordPress metadata You can use https://regex101.com/ to validate your regex """ # the csv delimiter DELIMITER = "," @classmethod def filter_valid_rows(cls, csv_file, columns=JAHIA2WP_COLUMNS): """Shortcut method to call get_valid_rows, print errors, and only return valid elements""" # use Veritas to get valid rows validator = cls(csv_file, columns) rows = validator.get_valid_rows() # print errors if validator.errors: print("The following lines have errors and have been filtered out:\n") validator.print_errors() # return valid rows only return rows def __init__(self, csv_path, columns=JAHIA2WP_COLUMNS): """ csv_path: path on file system pointing the CSV file to validate columns: description of the validations to make on columns, array of tuple [(column_name, validator, is_unique), (), ()] """ self.csv_path = csv_path # the rows self.rows = [] # the VeritasErrors self.errors = [] # the VeritasColumns self.columns = [] # define the columns for name, validator, is_unique in columns: self.columns.append(VeritasColumn(name, validator, is_unique)) # load the rows self.rows = Utils.csv_filepath_to_dict(file_path=self.csv_path, delimiter=self.DELIMITER) def validate(self): """Validate the columns Return True -> no errors False -> errors """ # check the regexp for column in self.columns: self._check_validators(column) # check the uniqueness for column in self.columns: if column.is_unique: self._check_unique(column) # sort the errors by the line number self.errors.sort(key=lambda x: x.line) return not self.errors def print_errors(self): """Prints the errors""" for error in self.errors: print(error.message) def get_valid_rows(self): """Return the content of the CSV file, less the lines which have an error""" # initialize errors and run validation self.errors = [] self.validate() # local function to filter out lines with errors lines_with_errors = set([error.line for error in self.errors]) def _is_valid(item): index, row = item return index not in lines_with_errors # return valid rows return tuple(filter(_is_valid, enumerate(self.rows))) def _check_validators(self, column, message=None): """Check all the given column values with the given regex""" column_name = column.name message = message or "invalid {}".format(column_name) for index, row in enumerate(self.rows, start=1): text = row[column_name] try: column.validator(text) except ValidationError: error = "{} : {}".format(message, text) self._add_error([index], column_name, error) def _check_unique(self, column, message=None): """Check that all the values of the given column are unique""" unique = {} column_name = column.name message = message or "{} is not unique".format(column_name) for index, row in enumerate(self.rows, start=1): text = row[column_name] lines = unique.setdefault(text, []) lines.append(index) if len(lines) > 1: error = "{} : {}".format(message, text) self._add_error(lines, column_name, error) def _add_error(self, lines, column_name, message): """Add the given error to the list of errors""" for line in lines: error = VeritasError(line=line, column_name=column_name, message=message) self.errors.append(error) class VeritasColumn: """A VeritasColumn represents a column in the CSV file""" def __init__(self, column_name, validator, is_unique): self.name = column_name # the validator used to validate the values in the column self.validator = validator # should all the values be unique in the column? self.is_unique = is_unique class VeritasError: """An error in the CVS file""" def __init__(self, line, column_name, message): """Constructor""" self.line = line self.column_name = column_name self.message = "Error line {} for column {} : {}".format(line, column_name, message)
{ "repo_name": "epfl-idevelop/jahia2wp", "path": "src/veritas/veritas.py", "copies": "1", "size": "6035", "license": "mit", "hash": 1192338015618148400, "line_mean": 30.7631578947, "line_max": 109, "alpha_frac": 0.6096106048, "autogenerated": false, "ratio": 4.036789297658863, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005787144331693648, "num_lines": 190 }
""" All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017 Validators are extended from Django ones : https://docs.djangoproject.com/fr/1.11/ref/validators - RegexValidator - EmailValidator - URLValidator - DecimalValidator - validate_email - validate_slug - validate_unicode_slug - MaxValueValidator - MinValueValidator - MaxLengthValidator - MinLengthValidator They are functions (or callable objects) that raise a ValidationError on failure """ import os from django.conf import settings as dj_settings from django.core.exceptions import ValidationError from django.core.validators import RegexValidator from epflldap.ldap_search import get_unit_id from settings import SUPPORTED_LANGUAGES dj_settings.configure(USE_I18N=False) class ChoiceValidator(RegexValidator): def __init__(self, choices, **kwargs): regex = "^({})$".format("|".join(choices)) super(ChoiceValidator, self).__init__(regex=regex, **kwargs) class MultipleChoicesValidator(RegexValidator): def __init__(self, choices, separator=',', **kwargs): base_regex = "({})".format("|".join(choices)) regex = "^{0}(,{0})*$".format(base_regex) super(MultipleChoicesValidator, self).__init__(regex=regex, **kwargs) def validate_integer(text): if not type(text) is int: RegexValidator(regex="^[0-9]+$")(text) def validate_string(text): RegexValidator(regex="^.+$")(text) def validate_yes_or_no(text): if not type(text) is bool: ChoiceValidator(choices=['yes', 'no'])(text.lower()) def validate_gaspar_username(name): RegexValidator(regex="^[_\-\.a-zA-Z0-9]+$")(name) def validate_db_name(name): RegexValidator(regex="^[a-z0-9]{8,16}$")(name) def validate_openshift_env(text): if not os.path.isdir('/srv/{}'.format(text)): raise ValidationError("Openshift environment not valid: {}".format(text)) def validate_site_type(text): RegexValidator(regex="^wordpress$")(text) def validate_theme(text): RegexValidator(regex="^[a-zA-Z0-9_-]+$")(text) def validate_theme_faculty(text): RegexValidator(regex="^(|cdh|cdm|enac|ic|sb|sti|sv|assoc)$")(text.lower()) def validate_languages(text): MultipleChoicesValidator(SUPPORTED_LANGUAGES.keys())(text) def validate_unit(unit_name): # FIXME: epfl-ldap should a LDAP Exception try: if unit_name: get_unit_id(unit_name) except Exception: raise ValidationError("The unit name {} doesn't exist".format(unit_name)) def mock_validate_unit(unit_name): pass
{ "repo_name": "epfl-idevelop/jahia2wp", "path": "src/veritas/validators.py", "copies": "1", "size": "2622", "license": "mit", "hash": 8659910185403022000, "line_mean": 25.4848484848, "line_max": 90, "alpha_frac": 0.6800152555, "autogenerated": false, "ratio": 3.5053475935828877, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9681111283536885, "avg_score": 0.0008503131092004039, "num_lines": 99 }
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import json import math import random import re import socket import string import time import warnings from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import units import requests from requests.packages.urllib3 import exceptions import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder import interface from cinder.objects import fields from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver from cinder.volume import volume_types LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.BoolOpt('sf_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('sf_account_prefix', help='Create SolidFire accounts with this prefix. Any string ' 'can be used here, but the string \"hostname\" is special ' 'and will create a prefix using the cinder node hostname ' '(previous default behavior). The default is NO prefix.'), cfg.StrOpt('sf_volume_prefix', default='UUID-', help='Create SolidFire volumes with this prefix. Volume names ' 'are of the form <sf_volume_prefix><cinder-volume-id>. ' 'The default is to use a prefix of \'UUID-\'.'), cfg.StrOpt('sf_template_account_name', default='openstack-vtemplate', help='Account name on the SolidFire Cluster to use as owner of ' 'template/cache volumes (created if does not exist).'), cfg.BoolOpt('sf_allow_template_caching', default=True, help='Create an internal cache of copy of images when ' 'a bootable volume is created to eliminate fetch from ' 'glance and qemu-conversion on subsequent calls.'), cfg.StrOpt('sf_svip', help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.BoolOpt('sf_enable_volume_mapping', default=True, help='Create an internal mapping of volume IDs and account. ' 'Optimizes lookups and performance at the expense of ' 'memory, very large deployments may want to consider ' 'setting to False.'), cfg.PortOpt('sf_api_port', default=443, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.'), cfg.BoolOpt('sf_enable_vag', default=False, help='Utilize volume access groups on a per-tenant basis.')] CONF = cfg.CONF CONF.register_opts(sf_opts) # SolidFire API Error Constants xExceededLimit = 'xExceededLimit' xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup' xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist' xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup' def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @six.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise exception.SolidFireAPIException(message=msg) return func_retry return retry_dec @interface.volumedriver class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account 2.0.3 - Implement cluster pairing 2.0.4 - Implement volume replication 2.0.5 - Try and deal with the stupid retry/clear issues from objects and tflow 2.0.6 - Add a lock decorator around the clone_image method 2.0.7 - Add scaled IOPS """ VERSION = '2.0.7' # ThirdPartySystems wiki page CI_WIKI_NAME = "SolidFire_CI" driver_prefix = 'solidfire' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst'] sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100} sf_iops_lim_max = {'minIOPS': 15000, 'maxIOPS': 200000, 'burstIOPS': 200000} cluster_stats = {} retry_exc_tuple = (exception.SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xSliceNotRegistered', 'xNotReadyForIO'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.failed_over_id = kwargs.get('active_backend_id', None) self.active_cluster_info = {} self.configuration.append_config_values(sf_opts) self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} self.cluster_pairs = [] self.replication_enabled = False self.failed_over = False self.target_driver = SolidFireISCSI(solidfire_driver=self, configuration=self.configuration) if self.failed_over_id: remote_info = self._get_remote_info_by_id(self.failed_over_id) if remote_info: self._set_active_cluster_info(remote_info['endpoint']) else: LOG.error(_LE('Failed to initialize SolidFire driver to ' 'a remote cluster specified at id: %s'), self.failed_over_id) else: self._set_active_cluster_info() try: self._update_cluster_status() except exception.SolidFireAPIException: pass if self.configuration.sf_allow_template_caching: account = self.configuration.sf_template_account_name self.template_account_id = self._create_template_account(account) if not self.failed_over_id: self._set_cluster_pairs() def locked_image_id_operation(f, external=False): def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) if call_args.get('image_meta'): image_id = call_args['image_meta']['id'] else: err_msg = _('The decorated method must accept image_meta.') raise exception.VolumeBackendAPIException(data=err_msg) @utils.synchronized('%s-%s' % (lock_tag, image_id), external=external) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 def locked_source_id_operation(f, external=False): def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) src_arg = call_args.get('source', None) if src_arg and src_arg.get('id', None): source_id = call_args['source']['id'] else: err_msg = _('The decorated method must accept src_uuid.') raise exception.VolumeBackendAPIException(message=err_msg) @utils.synchronized('%s-%s' % (lock_tag, source_id), external=external) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 def __getattr__(self, attr): if hasattr(self.target_driver, attr): return getattr(self.target_driver, attr) else: msg = _('Attribute: %s not found.') % attr raise NotImplementedError(msg) def _get_remote_info_by_id(self, backend_id): remote_info = None for rd in self.configuration.get('replication_device', []): if rd.get('backend_id', None) == backend_id: remote_endpoint = self._build_endpoint_info(**rd) remote_info = self._get_remote_cluster_info(remote_endpoint) remote_info['endpoint'] = remote_endpoint if not remote_info['endpoint']['svip']: remote_info['endpoint']['svip'] = ( remote_info['svip'] + ':3260') return remote_info def _create_remote_pairing(self, remote_device): try: pairing_info = self._issue_api_request('StartClusterPairing', {}, version='8.0')['result'] pair_id = self._issue_api_request( 'CompleteClusterPairing', {'clusterPairingKey': pairing_info['clusterPairingKey']}, version='8.0', endpoint=remote_device['endpoint'])['result']['clusterPairID'] except exception.SolidFireAPIException as ex: if 'xPairingAlreadExists' in ex.msg: LOG.debug('Pairing already exists during init.') else: with excutils.save_and_reraise_exception(): LOG.error(_LE('Cluster pairing failed: %s'), ex.msg) LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id) remote_device['clusterPairID'] = pair_id return pair_id def _get_remote_cluster_info(self, remote_endpoint): return self._issue_api_request( 'GetClusterInfo', {}, endpoint=remote_endpoint)['result']['clusterInfo'] def _set_cluster_pairs(self): if not self.configuration.get('replication_device', None): self.replication = False return existing_pairs = self._issue_api_request( 'ListClusterPairs', {}, version='8.0')['result']['clusterPairs'] remote_pair = {} for rd in self.configuration.get('replication_device', []): remote_endpoint = self._build_endpoint_info(**rd) remote_info = self._get_remote_cluster_info(remote_endpoint) remote_info['endpoint'] = remote_endpoint if not remote_info['endpoint']['svip']: remote_info['endpoint']['svip'] = remote_info['svip'] + ':3260' for ep in existing_pairs: if rd['backend_id'] == ep['mvip']: remote_pair = ep LOG.debug("Found remote pair: %s", remote_pair) remote_info['clusterPairID'] = ep['clusterPairID'] break if not remote_pair: # NOTE(jdg): create_remote_pairing sets the # clusterPairID in remote_info for us self._create_remote_pairing(remote_info) self.cluster_pairs.append(remote_info) LOG.debug("Setting replication_enabled to True.") self.replication_enabled = True def _set_active_cluster_info(self, endpoint=None): if not endpoint: self.active_cluster_info['endpoint'] = self._build_endpoint_info() else: self.active_cluster_info['endpoint'] = endpoint for k, v in self._issue_api_request( 'GetClusterInfo', {})['result']['clusterInfo'].items(): self.active_cluster_info[k] = v # Add a couple extra things that are handy for us self.active_cluster_info['clusterAPIVersion'] = ( self._issue_api_request('GetClusterVersionInfo', {})['result']['clusterAPIVersion']) if self.configuration.get('sf_svip', None): self.active_cluster_info['svip'] = ( self.configuration.get('sf_svip')) def _create_provider_id_string(self, resource_id, account_or_vol_id): # NOTE(jdg): We use the same format, but in the case # of snapshots, we don't have an account id, we instead # swap that with the parent volume id return "%s %s %s" % (resource_id, account_or_vol_id, self.active_cluster_info['uuid']) def _init_snapshot_mappings(self, srefs): updates = [] sf_snaps = self._issue_api_request( 'ListSnapshots', {}, version='6.0')['result']['snapshots'] for s in srefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) sfsnap = next( (ss for ss in sf_snaps if ss['name'] == seek_name), None) if sfsnap: id_string = self._create_provider_id_string( sfsnap['snapshotID'], sfsnap['volumeID']) if s.get('provider_id') != id_string: updates.append( {'id': s['id'], 'provider_id': id_string}) return updates def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if v.get('provider_id', 'nil') != sfvol['volumeID']: updates.append( {'id': v['id'], 'provider_id': self._create_provider_id_string( sfvol['volumeID'], sfvol['accountID'])}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = self._init_snapshot_mappings(snaprefs) return (volume_updates, snapshot_updates) def _create_template_account(self, account_name): # We raise an API exception if the account doesn't exist # We need to take account_prefix settings into consideration # This just uses the same method to do template account create # as we use for any other OpenStack account account_name = self._get_sf_account_name(account_name) try: id = self._issue_api_request( 'GetAccountByName', {'username': account_name})['result']['account']['accountID'] except exception.SolidFireAPIException: chap_secret = self._generate_random_string(12) params = {'username': account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} id = self._issue_api_request('AddAccount', params)['result']['accountID'] return id def _build_endpoint_info(self, **kwargs): endpoint = {} endpoint['mvip'] = ( kwargs.get('mvip', self.configuration.san_ip)) endpoint['login'] = ( kwargs.get('login', self.configuration.san_login)) endpoint['passwd'] = ( kwargs.get('passwd', self.configuration.san_password)) endpoint['port'] = ( kwargs.get('port', self.configuration.sf_api_port)) endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], endpoint['port']) endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip) if not endpoint.get('mvip', None) and kwargs.get('backend_id', None): endpoint['mvip'] = kwargs.get('backend_id') return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None): if params is None: params = {} if endpoint is None: endpoint = self.active_cluster_info['endpoint'] payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=False, timeout=30) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) raise exception.SolidFireRetryableException(message=msg) if 'error' in response: msg = _('API response: %s') % response raise exception.SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id, endpoint=None): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params, endpoint=endpoint)['result']['volumes'] def _get_sfaccount_by_name(self, sf_account_name, endpoint=None): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params, endpoint=endpoint) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except exception.SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, project_id): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params) sfaccount = self._get_sfaccount_by_name(sf_account_name) return sfaccount def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set, length)) def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None): """Gets the connection info for specified account and volume.""" if endpoint: iscsi_portal = endpoint['svip'] else: iscsi_portal = self.active_cluster_info['svip'] if ':' not in iscsi_portal: iscsi_portal += ':3260' chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID'], endpoint=endpoint) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_LE('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!'), sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) model_update['provider_id'] = ( self._create_provider_id_string(sf_volume_id, sfaccount['accountID'])) return model_update def _snapshot_discovery(self, src_uuid, params, vref): # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False sf_vol = None snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume(src_uuid) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True return params, is_clone, sf_vol def _do_clone_volume(self, src_uuid, vref, sf_src_snap=None): """Create a clone of an existing volume or snapshot.""" attributes = {} sf_account = self._get_create_account(vref['project_id']) params = {'name': '%(prefix)s%(id)s' % {'prefix': self.configuration.sf_volume_prefix, 'id': vref['id']}, 'newAccountID': sf_account['accountID']} is_clone = False sf_vol = None if sf_src_snap: # In some scenarios we are passed the snapshot information that we # are supposed to clone. params['snapshotID'] = sf_src_snap['snapshotID'] params['volumeID'] = sf_src_snap['volumeID'] params['newSize'] = int(vref['size'] * units.Gi) else: params, is_clone, sf_vol = self._snapshot_discovery(src_uuid, params, vref) data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided qos = self._retrieve_qos_setting(vref) params = {'volumeID': sf_volume_id} if qos: params['qos'] = qos create_time = vref['created_at'].isoformat() attributes = {'uuid': vref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sf_account, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _do_volume_create(self, sf_account, params, endpoint=None): params['accountID'] = sf_account['accountID'] sf_volid = self._issue_api_request( 'CreateVolume', params, endpoint=endpoint)['result']['volumeID'] return self._get_model_info(sf_account, sf_volid, endpoint=endpoint) def _do_snapshot_create(self, params): model_update = {} snapshot_id = self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] snaps = self._get_sf_snapshots() snap = ( next((s for s in snaps if int(s["snapshotID"]) == int(snapshot_id)), None)) model_update['provider_id'] = ( self._create_provider_id_string(snap['snapshotID'], snap['volumeID'])) return model_update def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning(_LW('More than one valid preset was ' 'detected, using %s'), presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _set_qos_by_volume_type(self, ctxt, type_id, vol_size): qos = {} scale_qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) if key in self.sf_scale_qos_keys: scale_qos[key] = value # look for the 'scaledIOPS' key and scale QoS if set if 'scaledIOPS' in scale_qos: scale_qos.pop('scaledIOPS') for key, value in scale_qos.items(): if key == 'scaleMin': qos['minIOPS'] = (qos['minIOPS'] + (int(value) * (vol_size - 1))) elif key == 'scaleMax': qos['maxIOPS'] = (qos['maxIOPS'] + (int(value) * (vol_size - 1))) elif key == 'scaleBurst': qos['burstIOPS'] = (qos['burstIOPS'] + (int(value) * (vol_size - 1))) # Cap the IOPS values at their limits capped = False for key, value in qos.items(): if value > self.sf_iops_lim_max[key]: qos[key] = self.sf_iops_lim_max[key] capped = True if value < self.sf_iops_lim_min[key]: qos[key] = self.sf_iops_lim_min[key] capped = True if capped: LOG.debug("A SolidFire QoS value was capped at the defined limits") # Check that minIOPS <= maxIOPS <= burstIOPS if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)): msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= " "burstIOPS. Currently: Min: %(min)s, Max: " "%(max)s, Burst: %(burst)s.") % {"min": qos['minIOPS'], "max": qos['maxIOPS'], "burst": qos['burstIOPS']}) raise exception.InvalidQoSSpecs(reason=msg) return qos def _get_sf_volume(self, uuid, params=None): if params: vols = self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] else: vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = '' if meta: alt_id = meta.get('uuid', '') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _create_image_volume(self, context, image_meta, image_service, image_id): with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi)) attributes = {} attributes['image_info'] = {} attributes['image_info']['image_updated_at'] = ( image_meta['updated_at'].isoformat()) attributes['image_info']['image_name'] = ( image_meta['name']) attributes['image_info']['image_created_at'] = ( image_meta['created_at'].isoformat()) attributes['image_info']['image_id'] = image_meta['id'] params = {'name': 'OpenStackIMG-%s' % image_id, 'accountID': self.template_account_id, 'sliceCount': 1, 'totalSize': int(virtual_size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': {}} sf_account = self._issue_api_request( 'GetAccountByID', {'accountID': self.template_account_id})['result']['account'] template_vol = self._do_volume_create(sf_account, params) tvol = {} tvol['id'] = image_id tvol['provider_location'] = template_vol['provider_location'] tvol['provider_auth'] = template_vol['provider_auth'] try: connector = {'multipath': False} conn = self.initialize_connection(tvol, connector) attach_info = super(SolidFireDriver, self)._connect_device( conn) properties = 'na' image_utils.convert_image(tmp_image, attach_info['device']['path'], 'raw', run_as_root=True) data = image_utils.qemu_img_info(attach_info['device']['path'], run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % {'vol_format': 'raw', 'file_format': data. file_format}) except Exception as exc: vol = self._get_sf_volume(image_id) LOG.error(_LE('Failed image conversion during ' 'cache creation: %s'), exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', vol['volumeID']) self._detach_volume(context, attach_info, tvol, properties) self._issue_api_request('DeleteVolume', params) self._issue_api_request('PurgeDeletedVolume', params) return self._detach_volume(context, attach_info, tvol, properties) sf_vol = self._get_sf_volume(image_id, params) LOG.debug('Successfully created SolidFire Image Template ' 'for image-id: %s', image_id) return sf_vol def _verify_image_volume(self, context, image_meta, image_service): # This method just verifies that IF we have a cache volume that # it's still up to date and current WRT the image in Glance # ie an image-update hasn't occurred since we grabbed it # If it's out of date, just delete it and we'll create a new one # Any other case we don't care and just return without doing anything params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) if not sf_vol: self._create_image_volume(context, image_meta, image_service, image_meta['id']) return if sf_vol['attributes']['image_info']['image_updated_at'] != ( image_meta['updated_at'].isoformat()): params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) self._create_image_volume(context, image_meta, image_service, image_meta['id']) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( 'ListAccounts', {})['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if cinder_project_id in acc['username']]) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_all_deleted_volumes(self, cinder_uuid=None): params = {} vols = self._issue_api_request('ListDeletedVolumes', params)['result']['volumes'] if cinder_uuid: deleted_vols = ([v for v in vols if cinder_uuid in v['name']]) else: deleted_vols = [v for v in vols] return deleted_vols def _get_account_create_availability(self, accounts): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if self._get_volumes_for_account( acc['accountID']) > self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') return sfaccount return None def _get_create_account(self, proj_id): # Retrieve SolidFire accountID to be used for creating volumes. sf_accounts = self._get_sfaccounts_for_tenant(proj_id) if not sf_accounts: sf_account = self._create_sfaccount(proj_id) else: # Check availability for creates sf_account = self._get_account_create_availability(sf_accounts) if not sf_account: msg = _('Volumes/account exceeded on both primary and ' 'secondary SolidFire accounts.') raise exception.SolidFireDriverException(msg) return sf_account def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional params = {'accountID': sf_account_id} vols = self._issue_api_request('ListVolumesForAccount', params)['result']['volumes'] if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def _create_vag(self, iqn, vol_id=None): """Create a volume access group(vag). Returns the vag_id. """ vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) params = {'name': vag_name, 'initiators': [iqn], 'volumes': [vol_id], 'attributes': {'openstack': True}} try: result = self._issue_api_request('CreateVolumeAccessGroup', params, version='7.0') return result['result']['volumeAccessGroupID'] except exception.SolidFireAPIException as error: if xExceededLimit in error.msg: if iqn in error.msg: # Initiator double registered. return self._safe_create_vag(iqn, vol_id) else: # VAG limit reached. Purge and start over. self._purge_vags() return self._safe_create_vag(iqn, vol_id) else: raise def _safe_create_vag(self, iqn, vol_id=None): # Potential race condition with simultaneous volume attaches to the # same host. To help avoid this, VAG creation makes a best attempt at # finding and using an existing VAG. vags = self._get_vags_by_name(iqn) if vags: # Filter through the vags and find the one with matching initiator vag = next((v for v in vags if iqn in v['initiators']), None) if vag: return vag['volumeAccessGroupID'] else: # No matches, use the first result, add initiator IQN. vag_id = vags[0]['volumeAccessGroupID'] return self._add_initiator_to_vag(iqn, vag_id) return self._create_vag(iqn, vol_id) def _base_get_vags(self): params = {} vags = self._issue_api_request( 'ListVolumeAccessGroups', params, version='7.0')['result']['volumeAccessGroups'] return vags def _get_vags_by_name(self, iqn): """Retrieve SolidFire volume access group objects by name. Returns an array of vags with a matching name value. Returns an empty array if there are no matches. """ vags = self._base_get_vags() vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) matching_vags = [vag for vag in vags if vag['name'] == vag_name] return matching_vags def _add_initiator_to_vag(self, iqn, vag_id): # Added a vag_id return as there is a chance that we might have to # create a new VAG if our target VAG is deleted underneath us. params = {"initiators": [iqn], "volumeAccessGroupID": vag_id} try: self._issue_api_request('AddInitiatorsToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: # No locking means sometimes a VAG can be removed by a parallel # volume detach against the same host. return self._safe_create_vag(iqn) else: raise def _add_volume_to_vag(self, vol_id, iqn, vag_id): # Added a vag_id return to be consistent with add_initiator_to_vag. It # isn't necessary but may be helpful in the future. params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('AddVolumesToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: return self._safe_create_vag(iqn, vol_id) else: raise def _remove_volume_from_vag(self, vol_id, vag_id): params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('RemoveVolumesFromVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xNotInVolumeAccessGroup in error.msg: pass elif xVolumeAccessGroupIDDoesNotExist in error.msg: pass else: raise def _remove_volume_from_vags(self, vol_id): # Due to all sorts of uncertainty around multiattach, on volume # deletion we make a best attempt at removing the vol_id from VAGs. vags = self._base_get_vags() targets = [v for v in vags if vol_id in v['volumes']] for vag in targets: self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID']) def _remove_vag(self, vag_id): params = {"volumeAccessGroupID": vag_id} try: self._issue_api_request('DeleteVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xVolumeAccessGroupIDDoesNotExist not in error.msg: raise def _purge_vags(self, limit=10): # Purge up to limit number of VAGs that have no active volumes, # initiators, and an OpenStack attribute. Purge oldest VAGs first. vags = self._base_get_vags() targets = [v for v in vags if v['volumes'] == [] and v['initiators'] == [] and v['deletedVolumes'] == [] and v['attributes'].get('openstack')] sorted_targets = sorted(targets, key=lambda k: k['volumeAccessGroupID']) for vag in sorted_targets[:limit]: self._remove_vag(vag['volumeAccessGroupID']) @locked_image_id_operation def clone_image(self, context, volume, image_location, image_meta, image_service): """Clone an existing image volume.""" public = False # Check out pre-requisites: # Is template caching enabled? if not self.configuration.sf_allow_template_caching: return None, False # NOTE(jdg): Glance V2 moved from is_public to visibility # so we check both, as we don't necessarily know or want # to care which we're using. Will need to look at # future handling of things like shared and community # but for now, it's owner or public and that's it visibility = image_meta.get('visibility', None) if visibility and visibility == 'public': public = True elif image_meta.get('is_public', False): public = True else: if image_meta['owner'] == volume['project_id']: public = True if not public: LOG.warning(_LW("Requested image is not " "accessible by current Tenant.")) return None, False try: self._verify_image_volume(context, image_meta, image_service) except exception.SolidFireAPIException: return None, False # Ok, should be good to go now, try it again (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], volume) return model, True def _retrieve_qos_setting(self, volume): qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id, volume.get('size')) return qos def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ slice_count = 1 attributes = {} sf_account = self._get_create_account(volume['project_id']) qos = self._retrieve_qos_setting(volume) create_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'created_at': create_time} vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id']) params = {'name': vname, 'accountID': sf_account['accountID'], 'sliceCount': slice_count, 'totalSize': int(volume['size'] * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') vname = '%s%s' % (self.configuration.sf_volume_prefix, v) params['name'] = vname params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v model_update = self._do_volume_create(sf_account, params) try: rep_settings = self._retrieve_replication_settings(volume) if self.replication_enabled and rep_settings: volume['volumeID'] = ( int(model_update['provider_id'].split()[0])) self._replicate_volume(volume, params, sf_account, rep_settings) except exception.SolidFireAPIException: # NOTE(jdg): Something went wrong after the source create, due to # the way TFLOW works and it's insistence on retrying the same # command over and over coupled with the fact that the introduction # of objects now sets host to None on failures we'll end up with an # orphaned volume on the backend for every one of these segments # that fail, for n-retries. Sad Sad Panda!! We'll just do it # ourselves until we can get a general fix in Cinder further up the # line with excutils.save_and_reraise_exception(): sf_volid = int(model_update['provider_id'].split()[0]) self._issue_api_request('DeleteVolume', {'volumeID': sf_volid}) return model_update def _retrieve_replication_settings(self, volume): rep_data = {} ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: rep_data = self._set_rep_by_volume_type(ctxt, type_id) return rep_data def _set_rep_by_volume_type(self, ctxt, type_id): rep_opts = {} type_ref = volume_types.get_volume_type(ctxt, type_id) specs = type_ref.get('extra_specs') if specs.get('replication', 'disabled').lower() == 'enabled': rep_opts['targets'] = specs.get( 'solidfire:replication_targets', self.cluster_pairs[0]) return rep_opts def _replicate_volume(self, volume, src_params, parent_sfaccount, rep_info): params = {} # TODO(jdg): Right now we just go to first pair, # need to add parsing of rep_info eventually # in other words "rep_info" is not used yet! tgt_endpoint = self.cluster_pairs[0]['endpoint'] LOG.debug("Replicating volume on remote cluster: %s", tgt_endpoint) params['attributes'] = src_params['attributes'] params['username'] = self._get_sf_account_name(volume['project_id']) try: params['initiatorSecret'] = parent_sfaccount['initiatorSecret'] params['targetSecret'] = parent_sfaccount['targetSecret'] self._issue_api_request( 'AddAccount', params, endpoint=tgt_endpoint)['result']['accountID'] except exception.SolidFireAPIException as ex: if 'xDuplicateUsername' not in ex.msg: raise remote_account = ( self._get_sfaccount_by_name(params['username'], endpoint=tgt_endpoint)) # Create the volume on the remote cluster w/same params as original params = src_params params['accountID'] = remote_account['accountID'] LOG.debug("Create remote volume on: %(endpoint)s with account: " "%(account)s", {'endpoint': tgt_endpoint['url'], 'account': remote_account}) model_update = self._do_volume_create( remote_account, params, endpoint=tgt_endpoint) tgt_sfid = int(model_update['provider_id'].split()[0]) params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'} self._issue_api_request('ModifyVolume', params, '8.0', endpoint=tgt_endpoint) # Enable volume pairing LOG.debug("Start volume pairing on volume ID: %s", volume['volumeID']) params = {'volumeID': volume['volumeID']} rep_key = self._issue_api_request('StartVolumePairing', params, '8.0')['result']['volumePairingKey'] params = {'volumeID': tgt_sfid, 'volumePairingKey': rep_key} LOG.debug("Issue CompleteVolumePairing request on remote: " "%(endpoint)s, %(parameters)s", {'endpoint': tgt_endpoint['url'], 'parameters': params}) self._issue_api_request('CompleteVolumePairing', params, '8.0', endpoint=tgt_endpoint) LOG.debug("Completed volume pairing.") return model_update @locked_source_id_operation def create_cloned_volume(self, volume, source): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( source['id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ sf_vol = None accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) LOG.error(_LE("This usually means the volume was never " "successfully created.")) return for acc in accounts: vols = self._get_volumes_for_account(acc['accountID'], volume['name_id']) if vols: sf_vol = vols[0] break if sf_vol is not None: for vp in sf_vol.get('volumePairs', []): LOG.debug("Deleting paired volume on remote cluster...") pair_id = vp['clusterPairID'] for cluster in self.cluster_pairs: if cluster['clusterPairID'] == pair_id: params = {'volumeID': vp['remoteVolumeID']} LOG.debug("Issue Delete request on cluster: " "%(remote)s with params: %(parameters)s", {'remote': cluster['endpoint']['url'], 'parameters': params}) self._issue_api_request('DeleteVolume', params, endpoint=cluster['endpoint']) if sf_vol['status'] == 'active': params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) if volume.get('multiattach'): self._remove_volume_from_vags(sf_vol['volumeID']) else: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id']) accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for acct in accounts: params = {'accountID': acct['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol: sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return # Make sure it's not "old style" using clones as snaps LOG.debug("Snapshot not found, checking old style clones.") self.delete_volume(snapshot) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!"), snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id'])} return self._do_snapshot_create(params) @locked_source_id_operation def create_volume_from_snapshot(self, volume, source): """Create a volume from the specified snapshot.""" if source.get('cgsnapshot_id'): # We're creating a volume from a snapshot that resulted from a # consistency group snapshot. Because of the way that SolidFire # creates cgsnaps, we have to search for the correct snapshot. cgsnapshot_id = source.get('cgsnapshot_id') snapshot_id = source.get('volume_id') sf_name = self.configuration.sf_volume_prefix + cgsnapshot_id sf_group_snap = self._get_group_snapshot_by_name(sf_name) return self._create_clone_from_sf_snapshot(snapshot_id, cgsnapshot_id, sf_group_snap, volume) (_data, _sfaccount, model) = self._do_clone_volume( source['id'], volume) return model # Consistency group helpers def _create_group_snapshot(self, name, sf_volumes): # Group snapshot is our version of a consistency group snapshot. vol_ids = [vol['volumeID'] for vol in sf_volumes] params = {'name': name, 'volumes': vol_ids} snapshot_id = self._issue_api_request('CreateGroupSnapshot', params, version='7.0') return snapshot_id['result'] def _group_snapshot_creator(self, gsnap_name, src_vol_ids): # Common helper that takes in an array of OpenStack Volume UUIDs and # creates a SolidFire group snapshot with them. vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in src_vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(src_vol_ids) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder volumes. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(src_vol_ids)}) raise exception.SolidFireDriverException(msg) result = self._create_group_snapshot(gsnap_name, target_vols) return result def _create_temp_group_snapshot(self, source_cg, source_vols): # Take a temporary snapshot to create the volumes for a new # consistency group. gsnap_name = ("%(prefix)s%(id)s-tmp" % {"prefix": self.configuration.sf_volume_prefix, "id": source_cg['id']}) vol_ids = [vol['id'] for vol in source_vols] self._group_snapshot_creator(gsnap_name, vol_ids) return gsnap_name def _list_group_snapshots(self): result = self._issue_api_request('ListGroupSnapshots', {}, version='7.0') return result['result']['groupSnapshots'] def _get_group_snapshot_by_name(self, name): target_snaps = self._list_group_snapshots() target = next((snap for snap in target_snaps if snap['name'] == name), None) return target def _delete_group_snapshot(self, gsnapid): params = {'groupSnapshotID': gsnapid} self._issue_api_request('DeleteGroupSnapshot', params, version='7.0') def _delete_cgsnapshot_by_name(self, snap_name): # Common function used to find and delete a snapshot. target = self._get_group_snapshot_by_name(snap_name) if not target: msg = _("Failed to find group snapshot named: %s") % snap_name raise exception.SolidFireDriverException(msg) self._delete_group_snapshot(target['groupSnapshotID']) def _find_linked_snapshot(self, target_uuid, group_snap): # Because group snapshots name each individual snapshot the group # snapshot name, we have to trawl through the SolidFire snapshots to # find the SolidFire snapshot from the group that is linked with the # SolidFire volumeID that is linked to the Cinder snapshot source # volume. source_vol = self._get_sf_volume(target_uuid) target_snap = next((sn for sn in group_snap['members'] if sn['volumeID'] == source_vol['volumeID']), None) return target_snap def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid, sf_group_snap, vol): # Find the correct SolidFire backing snapshot. sf_src_snap = self._find_linked_snapshot(target_uuid, sf_group_snap) _data, _sfaccount, model = self._do_clone_volume(src_uuid, vol, sf_src_snap) model['id'] = vol['id'] model['status'] = 'available' return model def _map_sf_volumes(self, cinder_volumes, endpoint=None): """Get a list of SolidFire volumes. Creates a list of SolidFire volumes based on matching a list of cinder volume ID's, also adds an 'cinder_id' key to match cinder. """ vols = self._issue_api_request( 'ListActiveVolumes', {}, endpoint=endpoint)['result']['volumes'] vlist = ( [sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in sfvol['name']]) for v in vlist: v['cinder_id'] = v['name'].split( self.configuration.sf_volume_prefix)[1] return vlist # Required consistency group functions def create_consistencygroup(self, ctxt, group): # SolidFire does not have a viable means for storing consistency group # volume associations. So, we're just going to play along with the # consistency group song and dance. There will be a lot of no-ops # because of this. return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def create_consistencygroup_from_src(self, ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): if cgsnapshot and snapshots: sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] sf_group_snap = self._get_group_snapshot_by_name(sf_name) # Go about creating volumes from provided snaps. vol_models = [] for vol, snap in zip(volumes, snapshots): vol_models.append(self._create_clone_from_sf_snapshot( snap['volume_id'], snap['id'], sf_group_snap, vol)) return ({'status': fields.ConsistencyGroupStatus.AVAILABLE}, vol_models) elif source_cg and source_vols: # Create temporary group snapshot. gsnap_name = self._create_temp_group_snapshot(source_cg, source_vols) try: sf_group_snap = self._get_group_snapshot_by_name(gsnap_name) # For each temporary snapshot clone the volume. vol_models = [] for vol in volumes: vol_models.append(self._create_clone_from_sf_snapshot( vol['source_volid'], vol['source_volid'], sf_group_snap, vol)) finally: self._delete_cgsnapshot_by_name(gsnap_name) return {'status': 'available'}, vol_models def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): vol_ids = [snapshot['volume_id'] for snapshot in snapshots] vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(snapshots) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder snapshots. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(snapshots)}) raise exception.SolidFireDriverException(msg) snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._create_group_snapshot(snap_name, target_vols) return None, None def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): # Similar to create_consistencygroup, SolidFire's lack of a consistency # group object means there is nothing to update on the cluster. return None, None, None def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._delete_cgsnapshot_by_name(snap_name) return None, None def delete_consistencygroup(self, ctxt, group, volumes): for vol in volumes: self.delete_volume(vol) return None, None def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except exception.SolidFireAPIException: pass return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi) } self._issue_api_request('ModifyVolume', params, version='5.0') def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['consistencygroup_support'] = True data['replication_enabled'] = self.replication_enabled if self.replication_enabled: data['replication'] = 'enabled' data['active_cluster_mvip'] = self.active_cluster_info['mvip'] data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True try: results = self._issue_api_request('GetClusterCapacity', params) except exception.SolidFireAPIException: data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 self.cluster_stats = data return results = results['result']['clusterCapacity'] free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data['total_capacity_gb'] = ( float(results['maxProvisionedSpace'] / units.Gi)) data['free_capacity_gb'] = float(free_capacity / units.Gi) data['compression_percent'] = ( results['compressionPercent']) data['deduplicaton_percent'] = ( results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) self.cluster_stats = data def initialize_connection(self, volume, connector): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ properties = self._sf_initialize_connection(volume, connector) properties['data']['discard'] = True return properties def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def terminate_connection(self, volume, properties, force): return self._sf_terminate_connection(volume, properties, force) def detach_volume(self, context, volume, attachment=None): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "detach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._create_sfaccount(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id'], volume.get('size')) if qos: params['qos'] = qos self._issue_api_request('ModifyVolume', params) return True def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._create_sfaccount(volume['project_id']) attributes = {} qos = self._retrieve_qos_setting(volume) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} params = {'name': volume['name'], 'volumeID': sf_ref['volumeID'], 'accountID': sfaccount['accountID'], 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} self._issue_api_request('ModifyVolume', params, version='5.0') return self._get_model_info(sfaccount, sf_ref['volumeID']) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': <name of existing volume on SF Cluster>} """ sfid = external_ref.get('source-id', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] return int(math.ceil(float(vols[0]['totalSize']) / units.Gi)) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!"), volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') def _failover_volume(self, remote_vol, remote): """Modify remote volume to R/W mode.""" self._issue_api_request( 'RemoveVolumePair', {'volumeID': remote_vol['volumeID']}, endpoint=remote['endpoint'], version='7.0') params = {'volumeID': remote_vol['volumeID'], 'access': 'readWrite'} self._issue_api_request('ModifyVolume', params, endpoint=remote['endpoint']) def failover_host(self, context, volumes, secondary_id=None): """Failover to replication target.""" volume_updates = [] remote = None if secondary_id: for rc in self.cluster_pairs: if rc['mvip'] == secondary_id: remote = rc break if not remote: LOG.error(_LE("SolidFire driver received failover_host " "but was unable to find specified replication " "pair with id: %s."), secondary_id) raise exception.InvalidReplicationTarget else: remote = self.cluster_pairs[0] if not remote or not self.replication_enabled: LOG.error(_LE("SolidFire driver received failover_host " "request, however replication is NOT " "enabled, or there are no available " "targets to fail-over to.")) raise exception.UnableToFailOver(reason=_("Failover requested " "on non replicated " "backend.")) remote_vols = self._map_sf_volumes(volumes, endpoint=remote['endpoint']) primary_vols = self._map_sf_volumes(volumes) for v in volumes: remote_vlist = [sfv for sfv in remote_vols if sfv['cinder_id'] == v['id']] if len(remote_vlist) > 0: remote_vol = remote_vlist[0] self._failover_volume(remote_vol, remote) primary_vol = [sfv for sfv in primary_vols if sfv['cinder_id'] == v['id']][0] if len(primary_vol['volumePairs']) > 0: self._issue_api_request( 'RemoveVolumePair', {'volumeID': primary_vol['volumeID']}, version='7.0') iqn = remote_vol['iqn'] volume_updates.append( {'volume_id': v['id'], 'updates': { 'provider_location': ('%s %s %s' % (remote['endpoint']['svip'], iqn, 0)), 'replication_status': 'failed-over'}}) else: volume_updates.append({'volume_id': v['id'], 'updates': {'status': 'error', }}) # FIXME(jdg): This introduces a problem for us, up until now our driver # has been pretty much stateless and has allowed customers to run # active/active HA c-vol services with SolidFire. The introduction of # the active_cluster and failed_over attributes is going to break that # but for now that's going to be the trade off of using replciation self.active_cluster_info = remote self.failed_over = True return remote['mvip'], volume_updates def freeze_backend(self, context): """Freeze backend notification.""" pass def thaw_backend(self, context): """Thaw backend notification.""" pass class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def __getattr__(self, attr): if hasattr(self.sf_driver, attr): return getattr(self.sf_driver, attr) else: msg = _('Attribute: %s not found.') % attr raise NotImplementedError(msg) def _do_iscsi_export(self, volume): sfaccount = self._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except exception.SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass def _sf_initialize_connection(self, volume, connector): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ if self.configuration.sf_enable_vag: iqn = connector['initiator'] provider_id = volume['provider_id'] vol_id = int(provider_id.split()[0]) # safe_create_vag may opt to reuse vs create a vag, so we need to # add our vol_id. vag_id = self._safe_create_vag(iqn, vol_id) self._add_volume_to_vag(vol_id, iqn, vag_id) # Continue along with default behavior return super(SolidFireISCSI, self).initialize_connection(volume, connector) def _sf_terminate_connection(self, volume, properties, force): """Terminate the volume connection. Optionally remove volume from volume access group. If the VAG is empty then the VAG is also removed. """ if self.configuration.sf_enable_vag: iqn = properties['initiator'] vag = self._get_vags_by_name(iqn) provider_id = volume['provider_id'] vol_id = int(provider_id.split()[0]) if vag and not volume['multiattach']: # Multiattach causes problems with removing volumes from VAGs. # Compromise solution for now is to remove multiattach volumes # from VAGs during volume deletion. vag = vag[0] vag_id = vag['volumeAccessGroupID'] if [vol_id] == vag['volumes']: self._remove_vag(vag_id) elif vol_id in vag['volumes']: self._remove_volume_from_vag(vol_id, vag_id) return super(SolidFireISCSI, self).terminate_connection(volume, properties, force=force)
{ "repo_name": "ge0rgi/cinder", "path": "cinder/volume/drivers/solidfire.py", "copies": "1", "size": "91315", "license": "apache-2.0", "hash": -3983878327612395000, "line_mean": 41.9111842105, "line_max": 79, "alpha_frac": 0.5356951213, "autogenerated": false, "ratio": 4.35808714742519, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5393782268725189, "avg_score": null, "num_lines": null }
import json import math import random import re import socket import string import time import warnings from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units import requests from requests.packages.urllib3 import exceptions import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder.objects import fields from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver from cinder.volume import volume_types LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.BoolOpt('sf_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('sf_account_prefix', help='Create SolidFire accounts with this prefix. Any string ' 'can be used here, but the string \"hostname\" is special ' 'and will create a prefix using the cinder node hostname ' '(previous default behavior). The default is NO prefix.'), cfg.StrOpt('sf_volume_prefix', default='UUID-', help='Create SolidFire volumes with this prefix. Volume names ' 'are of the form <sf_volume_prefix><cinder-volume-id>. ' 'The default is to use a prefix of \'UUID-\'.'), cfg.StrOpt('sf_template_account_name', default='openstack-vtemplate', help='Account name on the SolidFire Cluster to use as owner of ' 'template/cache volumes (created if does not exist).'), cfg.BoolOpt('sf_allow_template_caching', default=True, help='Create an internal cache of copy of images when ' 'a bootable volume is created to eliminate fetch from ' 'glance and qemu-conversion on subsequent calls.'), cfg.StrOpt('sf_svip', help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.BoolOpt('sf_enable_volume_mapping', default=True, help='Create an internal mapping of volume IDs and account. ' 'Optimizes lookups and performance at the expense of ' 'memory, very large deployments may want to consider ' 'setting to False.'), cfg.PortOpt('sf_api_port', default=443, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.'), cfg.BoolOpt('sf_enable_vag', default=False, help='Utilize volume access groups on a per-tenant basis.')] CONF = cfg.CONF CONF.register_opts(sf_opts) # SolidFire API Error Constants xExceededLimit = 'xExceededLimit' xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup' xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist' xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup' def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @six.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise exception.SolidFireAPIException(message=msg) return func_retry return retry_dec class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account """ VERSION = '2.0.2' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] cluster_stats = {} retry_exc_tuple = (exception.SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xNotReadyForIO'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.cluster_uuid = None self.configuration.append_config_values(sf_opts) self._endpoint = self._build_endpoint_info() self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} try: self._update_cluster_status() except exception.SolidFireAPIException: pass if self.configuration.sf_allow_template_caching: account = self.configuration.sf_template_account_name self.template_account_id = self._create_template_account(account) self.target_driver = SolidFireISCSI(solidfire_driver=self, configuration=self.configuration) self._set_cluster_uuid() def __getattr__(self, attr): return getattr(self.target_driver, attr) def _set_cluster_uuid(self): self.cluster_uuid = ( self._get_cluster_info()['clusterInfo']['uuid']) def _parse_provider_id_string(self, id_string): return tuple(id_string.split()) def _create_provider_id_string(self, resource_id, account_or_vol_id, cluster_uuid=None): # NOTE(jdg): We use the same format, but in the case # of snapshots, we don't have an account id, we instead # swap that with the parent volume id cluster_id = self.cluster_uuid # We allow specifying a remote cluster if cluster_uuid: cluster_id = cluster_uuid return "%s %s %s" % (resource_id, account_or_vol_id, cluster_id) def _init_snapshot_mappings(self, srefs): updates = [] sf_snaps = self._issue_api_request( 'ListSnapshots', {}, version='6.0')['result']['snapshots'] for s in srefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) sfsnap = next( (ss for ss in sf_snaps if ss['name'] == seek_name), None) if sfsnap: id_string = self._create_provider_id_string( sfsnap['snapshotID'], sfsnap['volumeID']) if s.get('provider_id') != id_string: updates.append( {'id': s['id'], 'provider_id': id_string}) return updates def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if v.get('provider_id', 'nil') != sfvol['volumeID']: v['provider_id'] == sfvol['volumeID'] updates.append( {'id': v['id'], 'provider_id': self._create_provider_id_string( sfvol['volumeID'], sfvol['accountID'])}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = self._init_snapshot_mappings(snaprefs) return (volume_updates, snapshot_updates) def _create_template_account(self, account_name): # We raise an API exception if the account doesn't exist # We need to take account_prefix settings into consideration # This just uses the same method to do template account create # as we use for any other OpenStack account account_name = self._get_sf_account_name(account_name) try: id = self._issue_api_request( 'GetAccountByName', {'username': account_name})['result']['account']['accountID'] except exception.SolidFireAPIException: chap_secret = self._generate_random_string(12) params = {'username': account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} id = self._issue_api_request('AddAccount', params)['result']['accountID'] return id def _build_endpoint_info(self, **kwargs): endpoint = {} endpoint['mvip'] = ( kwargs.get('mvip', self.configuration.san_ip)) endpoint['login'] = ( kwargs.get('login', self.configuration.san_login)) endpoint['passwd'] = ( kwargs.get('passwd', self.configuration.san_password)) endpoint['port'] = ( kwargs.get('port', self.configuration.sf_api_port)) endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], endpoint['port']) # TODO(jdg): consider a call to GetAPI and setting version return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None): if params is None: params = {} if endpoint is None: endpoint = self._endpoint payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=False, timeout=30) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) raise exception.SolidFireRetryableException(message=msg) if 'error' in response: msg = _('API response: %s') % response raise exception.SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] def _get_sfaccount_by_name(self, sf_account_name): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except exception.SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, project_id): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params) sfaccount = self._get_sfaccount_by_name(sf_account_name) return sfaccount def _get_cluster_info(self): """Query the SolidFire cluster for some property info.""" params = {} return self._issue_api_request('GetClusterInfo', params)['result'] def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set, length)) def _get_model_info(self, sfaccount, sf_volume_id): """Gets the connection info for specified account and volume.""" cluster_info = self._get_cluster_info() if self.configuration.sf_svip is None: iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' else: iscsi_portal = self.configuration.sf_svip chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID']) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_LE('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!'), sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) model_update['provider_id'] = ( self._create_provider_id_string(sf_volume_id, sfaccount['accountID'], self.cluster_uuid)) return model_update def _snapshot_discovery(self, src_uuid, params, vref): # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False sf_vol = None snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume(src_uuid) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True return params, is_clone, sf_vol def _do_clone_volume(self, src_uuid, vref, sf_src_snap=None): """Create a clone of an existing volume or snapshot.""" attributes = {} sf_account = self._get_create_account(vref['project_id']) params = {'name': '%(prefix)s%(id)s' % {'prefix': self.configuration.sf_volume_prefix, 'id': vref['id']}, 'newAccountID': sf_account['accountID']} is_clone = False sf_vol = None if sf_src_snap: # In some scenarios we are passed the snapshot information that we # are supposed to clone. params['snapshotID'] = sf_src_snap['snapshotID'] params['volumeID'] = sf_src_snap['volumeID'] params['newSize'] = int(vref['size'] * units.Gi) else: params, is_clone, sf_vol = self._snapshot_discovery(src_uuid, params, vref) data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] qos = self._retrieve_qos_setting(vref) # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided qos_params = {'volumeID': sf_volume_id} create_time = vref['created_at'].isoformat() attributes = {'uuid': vref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} if qos: qos_params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) qos_params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', qos_params) model_update = self._get_model_info(sf_account, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _do_volume_create(self, sf_account, params): sf_volid = self._issue_api_request( 'CreateVolume', params)['result']['volumeID'] return self._get_model_info(sf_account, sf_volid) def _do_snapshot_create(self, params): model_update = {} snapshot_id = self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] snaps = self._get_sf_snapshots() snap = ( next((s for s in snaps if int(s["snapshotID"]) == int(snapshot_id)), None)) model_update['provider_id'] = ( self._create_provider_id_string(snap['snapshotID'], snap['volumeID'], self.cluster_uuid)) return model_update def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning(_LW('More than one valid preset was ' 'detected, using %s'), presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _set_qos_by_volume_type(self, ctxt, type_id): qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) return qos def _get_sf_volume(self, uuid, params=None): if params: vols = self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] else: vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = '' if meta: alt_id = meta.get('uuid', '') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _create_image_volume(self, context, image_meta, image_service, image_id): with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi)) attributes = {} attributes['image_info'] = {} attributes['image_info']['image_updated_at'] = ( image_meta['updated_at'].isoformat()) attributes['image_info']['image_name'] = ( image_meta['name']) attributes['image_info']['image_created_at'] = ( image_meta['created_at'].isoformat()) attributes['image_info']['image_id'] = image_meta['id'] params = {'name': 'OpenStackIMG-%s' % image_id, 'accountID': self.template_account_id, 'sliceCount': 1, 'totalSize': int(virtual_size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': {}} sf_account = self._issue_api_request( 'GetAccountByID', {'accountID': self.template_account_id})['result']['account'] template_vol = self._do_volume_create(sf_account, params) tvol = {} tvol['id'] = image_id tvol['provider_location'] = template_vol['provider_location'] tvol['provider_auth'] = template_vol['provider_auth'] connector = {'multipath': False} conn = self.initialize_connection(tvol, connector) attach_info = super(SolidFireDriver, self)._connect_device(conn) properties = 'na' try: image_utils.convert_image(tmp_image, attach_info['device']['path'], 'raw', run_as_root=True) data = image_utils.qemu_img_info(attach_info['device']['path'], run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % {'vol_format': 'raw', 'file_format': data. file_format}) except Exception as exc: vol = self._get_sf_volume(image_id) LOG.error(_LE('Failed image conversion during ' 'cache creation: %s'), exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', vol['volumeID']) self._detach_volume(context, attach_info, tvol, properties) self._issue_api_request('DeleteVolume', params) return self._detach_volume(context, attach_info, tvol, properties) sf_vol = self._get_sf_volume(image_id, params) LOG.debug('Successfully created SolidFire Image Template ' 'for image-id: %s', image_id) return sf_vol def _verify_image_volume(self, context, image_meta, image_service): # This method just verifies that IF we have a cache volume that # it's still up to date and current WRT the image in Glance # ie an image-update hasn't occurred since we grabbed it # If it's out of date, just delete it and we'll create a new one # Any other case we don't care and just return without doing anything params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) if sf_vol is None: return # Check updated_at field, delete copy and update if needed if sf_vol['attributes']['image_info']['image_updated_at'] == ( image_meta['updated_at'].isoformat()): return else: # Bummer, it's been updated, delete it params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) if not self._create_image_volume(context, image_meta, image_service, image_meta['id']): msg = _("Failed to create SolidFire Image-Volume") raise exception.SolidFireAPIException(msg) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( 'ListAccounts', {})['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if cinder_project_id in acc['username']]) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_all_deleted_volumes(self, cinder_uuid=None): params = {} vols = self._issue_api_request('ListDeletedVolumes', params)['result']['volumes'] if cinder_uuid: deleted_vols = ([v for v in vols if cinder_uuid in v['name']]) else: deleted_vols = [v for v in vols] return deleted_vols def _get_account_create_availability(self, accounts): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if self._get_volumes_for_account( acc['accountID']) > self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') return sfaccount return None def _get_create_account(self, proj_id): # Retrieve SolidFire accountID to be used for creating volumes. sf_accounts = self._get_sfaccounts_for_tenant(proj_id) if not sf_accounts: sf_account = self._create_sfaccount(proj_id) else: # Check availability for creates sf_account = self._get_account_create_availability(sf_accounts) if not sf_account: # TODO(jdg): We're not doing tertiaries, so fail. msg = _('Volumes/account exceeded on both primary and ' 'secondary SolidFire accounts.') raise exception.SolidFireDriverException(msg) return sf_account def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional params = {'accountID': sf_account_id} vols = self._issue_api_request('ListVolumesForAccount', params)['result']['volumes'] if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def _create_vag(self, iqn, vol_id=None): """Create a volume access group(vag). Returns the vag_id. """ vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) params = {'name': vag_name, 'initiators': [iqn], 'volumes': [vol_id], 'attributes': {'openstack': True}} try: result = self._issue_api_request('CreateVolumeAccessGroup', params, version='7.0') return result['result']['volumeAccessGroupID'] except exception.SolidFireAPIException as error: if xExceededLimit in error.msg: if iqn in error.msg: # Initiator double registered. return self._safe_create_vag(iqn, vol_id) else: # VAG limit reached. Purge and start over. self._purge_vags() return self._safe_create_vag(iqn, vol_id) else: raise def _safe_create_vag(self, iqn, vol_id=None): # Potential race condition with simultaneous volume attaches to the # same host. To help avoid this, VAG creation makes a best attempt at # finding and using an existing VAG. vags = self._get_vags_by_name(iqn) if vags: # Filter through the vags and find the one with matching initiator vag = next((v for v in vags if iqn in v['initiators']), None) if vag: return vag['volumeAccessGroupID'] else: # No matches, use the first result, add initiator IQN. vag_id = vags[0]['volumeAccessGroupID'] return self._add_initiator_to_vag(iqn, vag_id) return self._create_vag(iqn, vol_id) def _base_get_vags(self): params = {} vags = self._issue_api_request( 'ListVolumeAccessGroups', params, version='7.0')['result']['volumeAccessGroups'] return vags def _get_vags_by_name(self, iqn): """Retrieve SolidFire volume access group objects by name. Returns an array of vags with a matching name value. Returns an empty array if there are no matches. """ vags = self._base_get_vags() vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) matching_vags = [vag for vag in vags if vag['name'] == vag_name] return matching_vags def _add_initiator_to_vag(self, iqn, vag_id): # Added a vag_id return as there is a chance that we might have to # create a new VAG if our target VAG is deleted underneath us. params = {"initiators": [iqn], "volumeAccessGroupID": vag_id} try: self._issue_api_request('AddInitiatorsToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: # No locking means sometimes a VAG can be removed by a parallel # volume detach against the same host. return self._safe_create_vag(iqn) else: raise def _add_volume_to_vag(self, vol_id, iqn, vag_id): # Added a vag_id return to be consistent with add_initiator_to_vag. It # isn't necessary but may be helpful in the future. params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('AddVolumesToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: return self._safe_create_vag(iqn, vol_id) else: raise def _remove_volume_from_vag(self, vol_id, vag_id): params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('RemoveVolumesFromVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xNotInVolumeAccessGroup in error.msg: pass elif xVolumeAccessGroupIDDoesNotExist in error.msg: pass else: raise def _remove_volume_from_vags(self, vol_id): # Due to all sorts of uncertainty around multiattach, on volume # deletion we make a best attempt at removing the vol_id from VAGs. vags = self._base_get_vags() targets = [v for v in vags if vol_id in v['volumes']] for vag in targets: self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID']) def _remove_vag(self, vag_id): params = {"volumeAccessGroupID": vag_id} try: self._issue_api_request('DeleteVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xVolumeAccessGroupIDDoesNotExist not in error.msg: raise def _purge_vags(self, limit=10): # Purge up to limit number of VAGs that have no active volumes, # initiators, and an OpenStack attribute. Purge oldest VAGs first. vags = self._base_get_vags() targets = [v for v in vags if v['volumes'] == [] and v['initiators'] == [] and v['deletedVolumes'] == [] and v['attributes'].get('openstack')] sorted_targets = sorted(targets, key=lambda k: k['volumeAccessGroupID']) for vag in sorted_targets[:limit]: self._remove_vag(vag['volumeAccessGroupID']) def clone_image(self, context, volume, image_location, image_meta, image_service): public = False # Check out pre-requisites: # Is template caching enabled? if not self.configuration.sf_allow_template_caching: return None, False # NOTE(jdg): Glance V2 moved from is_public to visibility # so we check both, as we don't necessarily know or want # to care which we're using. Will need to look at # future handling of things like shared and community # but for now, it's owner or public and that's it visibility = image_meta.get('visibility', None) if visibility and visibility == 'public': public = True elif image_meta.get('is_public', False): public = True else: if image_meta['owner'] == volume['project_id']: public = True if not public: LOG.warning(_LW("Requested image is not " "accessible by current Tenant.")) return None, False try: self._verify_image_volume(context, image_meta, image_service) except exception.SolidFireAPIException: return None, False try: (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], volume) except exception.VolumeNotFound: if self._create_image_volume(context, image_meta, image_service, image_meta['id']) is None: # We failed, dump out return None, False # Ok, should be good to go now, try it again (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], volume) return model, True def _retrieve_qos_setting(self, volume): qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) return qos def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ slice_count = 1 attributes = {} sf_account = self._get_create_account(volume['project_id']) qos = self._retrieve_qos_setting(volume) create_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'created_at': create_time} if qos: for k, v in qos.items(): attributes[k] = str(v) vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id']) params = {'name': vname, 'accountID': sf_account['accountID'], 'sliceCount': slice_count, 'totalSize': int(volume['size'] * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') vname = '%s%s' % (self.configuration.sf_volume_prefix, v) params['name'] = vname params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v return self._do_volume_create(sf_account, params) def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ sf_vol = None accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) LOG.error(_LE("This usually means the volume was never " "successfully created.")) return for acc in accounts: vols = self._get_volumes_for_account(acc['accountID'], volume['id']) if vols: sf_vol = vols[0] break if sf_vol is not None: params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) if volume.get('multiattach'): self._remove_volume_from_vags(sf_vol['volumeID']) else: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id']) accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for acct in accounts: params = {'accountID': acct['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol: sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return # Make sure it's not "old style" using clones as snaps LOG.debug("Snapshot not found, checking old style clones.") self.delete_volume(snapshot) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!"), snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id'])} return self._do_snapshot_create(params) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], volume) return model # Consistency group helpers def _create_group_snapshot(self, name, sf_volumes): # Group snapshot is our version of a consistency group snapshot. vol_ids = [vol['volumeID'] for vol in sf_volumes] params = {'name': name, 'volumes': vol_ids} snapshot_id = self._issue_api_request('CreateGroupSnapshot', params, version='7.0') return snapshot_id['result'] def _group_snapshot_creator(self, gsnap_name, src_vol_ids): # Common helper that takes in an array of OpenStack Volume UUIDs and # creates a SolidFire group snapshot with them. vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in src_vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(src_vol_ids) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder volumes. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(src_vol_ids)}) raise exception.SolidFireDriverException(msg) result = self._create_group_snapshot(gsnap_name, target_vols) return result def _create_temp_group_snapshot(self, source_cg, source_vols): # Take a temporary snapshot to create the volumes for a new # consistency group. gsnap_name = ("%(prefix)s%(id)s-tmp" % {"prefix": self.configuration.sf_volume_prefix, "id": source_cg['id']}) vol_ids = [vol['id'] for vol in source_vols] self._group_snapshot_creator(gsnap_name, vol_ids) return gsnap_name def _list_group_snapshots(self): result = self._issue_api_request('ListGroupSnapshots', {}, version='7.0') return result['result']['groupSnapshots'] def _get_group_snapshot_by_name(self, name): target_snaps = self._list_group_snapshots() target = next((snap for snap in target_snaps if snap['name'] == name), None) return target def _delete_group_snapshot(self, gsnapid): params = {'groupSnapshotID': gsnapid} self._issue_api_request('DeleteGroupSnapshot', params, version='7.0') def _delete_cgsnapshot_by_name(self, snap_name): # Common function used to find and delete a snapshot. target = self._get_group_snapshot_by_name(snap_name) if not target: msg = _("Failed to find group snapshot named: %s") % snap_name raise exception.SolidFireDriverException(msg) self._delete_group_snapshot(target['groupSnapshotID']) def _find_linked_snapshot(self, target_uuid, group_snap): # Because group snapshots name each individual snapshot the group # snapshot name, we have to trawl through the SolidFire snapshots to # find the SolidFire snapshot from the group that is linked with the # SolidFire volumeID that is linked to the Cinder snapshot source # volume. source_vol = self._get_sf_volume(target_uuid) target_snap = next((sn for sn in group_snap['members'] if sn['volumeID'] == source_vol['volumeID']), None) return target_snap def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid, sf_group_snap, vol): # Find the correct SolidFire backing snapshot. sf_src_snap = self._find_linked_snapshot(target_uuid, sf_group_snap) _data, _sfaccount, model = self._do_clone_volume(src_uuid, vol, sf_src_snap) model['id'] = vol['id'] model['status'] = 'available' return model # Required consistency group functions def create_consistencygroup(self, ctxt, group): # SolidFire does not have a viable means for storing consistency group # volume associations. So, we're just going to play along with the # consistency group song and dance. There will be a lot of no-ops # because of this. return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def create_consistencygroup_from_src(self, ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): if cgsnapshot and snapshots: sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] sf_group_snap = self._get_group_snapshot_by_name(sf_name) # Go about creating volumes from provided snaps. vol_models = [] for vol, snap in zip(volumes, snapshots): vol_models.append(self._create_clone_from_sf_snapshot( snap['volume_id'], snap['id'], sf_group_snap, vol)) return ({'status': fields.ConsistencyGroupStatus.AVAILABLE}, vol_models) elif source_cg and source_vols: # Create temporary group snapshot. gsnap_name = self._create_temp_group_snapshot(source_cg, source_vols) try: sf_group_snap = self._get_group_snapshot_by_name(gsnap_name) # For each temporary snapshot clone the volume. vol_models = [] for vol in volumes: vol_models.append(self._create_clone_from_sf_snapshot( vol['source_volid'], vol['source_volid'], sf_group_snap, vol)) finally: self._delete_cgsnapshot_by_name(gsnap_name) return {'status': 'available'}, vol_models def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): vol_ids = [snapshot['volume_id'] for snapshot in snapshots] vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(snapshots) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder snapshots. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(snapshots)}) raise exception.SolidFireDriverException(msg) snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._create_group_snapshot(snap_name, target_vols) return None, None def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): # Similar to create_consistencygroup, SolidFire's lack of a consistency # group object means there is nothing to update on the cluster. return None, None, None def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._delete_cgsnapshot_by_name(snap_name) return None, None def delete_consistencygroup(self, ctxt, group, volumes): for vol in volumes: self.delete_volume(vol) return None, None def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except exception.SolidFireAPIException: pass return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi) } self._issue_api_request('ModifyVolume', params, version='5.0') def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} # NOTE(jdg): The SF api provides an UNBELIEVABLE amount # of stats data, this is just one of the calls results = self._issue_api_request('GetClusterCapacity', params) results = results['result']['clusterCapacity'] free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['consistencygroup_support'] = True data['total_capacity_gb'] = ( float(results['maxProvisionedSpace'] / units.Gi)) data['free_capacity_gb'] = float(free_capacity / units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['compression_percent'] = ( results['compressionPercent']) data['deduplicaton_percent'] = ( results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) self.cluster_stats = data def initialize_connection(self, volume, connector, initiator_data=None): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ properties = self._sf_initialize_connection(volume, connector, initiator_data) properties['data']['discard'] = True return properties def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def terminate_connection(self, volume, properties, force): return self._sf_terminate_connection(volume, properties, force) def detach_volume(self, context, volume, attachment=None): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "detach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._create_sfaccount(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id']) if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes self._issue_api_request('ModifyVolume', params) return True def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._create_sfaccount(volume['project_id']) attributes = {} qos = self._retrieve_qos_setting(volume) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} if qos: for k, v in qos.items(): attributes[k] = str(v) params = {'name': volume['name'], 'volumeID': sf_ref['volumeID'], 'accountID': sfaccount['accountID'], 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} self._issue_api_request('ModifyVolume', params, version='5.0') return self._get_model_info(sfaccount, sf_ref['volumeID']) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': <name of existing volume on SF Cluster>} """ sfid = external_ref.get('source-id', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] return int(vols[0]['totalSize']) / int(units.Gi) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!"), volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def __getattr__(self, attr): return getattr(self.sf_driver, attr) def _do_iscsi_export(self, volume): sfaccount = self._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except exception.SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass def _sf_initialize_connection(self, volume, connector, initiator_data=None): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ if self.configuration.sf_enable_vag: iqn = connector['initiator'] provider_id = volume['provider_id'] vol_id = int(self._parse_provider_id_string(provider_id)[0]) # safe_create_vag may opt to reuse vs create a vag, so we need to # add our vol_id. vag_id = self._safe_create_vag(iqn, vol_id) self._add_volume_to_vag(vol_id, iqn, vag_id) # Continue along with default behavior return super(SolidFireISCSI, self).initialize_connection(volume, connector) def _sf_terminate_connection(self, volume, properties, force): """Terminate the volume connection. Optionally remove volume from volume access group. If the VAG is empty then the VAG is also removed. """ if self.configuration.sf_enable_vag: iqn = properties['initiator'] vag = self._get_vags_by_name(iqn) provider_id = volume['provider_id'] vol_id = int(self._parse_provider_id_string(provider_id)[0]) if vag and not volume['multiattach']: # Multiattach causes problems with removing volumes from VAGs. # Compromise solution for now is to remove multiattach volumes # from VAGs during volume deletion. vag = vag[0] vag_id = vag['volumeAccessGroupID'] if [vol_id] == vag['volumes']: self._remove_vag(vag_id) elif vol_id in vag['volumes']: self._remove_volume_from_vag(vol_id, vag_id) return super(SolidFireISCSI, self).terminate_connection(volume, properties, force=force)
{ "repo_name": "dims/cinder", "path": "cinder/volume/drivers/solidfire.py", "copies": "1", "size": "72265", "license": "apache-2.0", "hash": -2706140054334626000, "line_mean": 41.0389761489, "line_max": 79, "alpha_frac": 0.539099149, "autogenerated": false, "ratio": 4.36014239169784, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000009859890949606097, "num_lines": 1719 }
import json import math import random import socket import string import time import warnings from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units import requests from requests.packages.urllib3 import exceptions import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver from cinder.volume import volume_types LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.BoolOpt('sf_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('sf_account_prefix', default=None, help='Create SolidFire accounts with this prefix. Any string ' 'can be used here, but the string \"hostname\" is special ' 'and will create a prefix using the cinder node hostname ' '(previous default behavior). The default is NO prefix.'), cfg.StrOpt('sf_template_account_name', default='openstack-vtemplate', help='Account name on the SolidFire Cluster to use as owner of ' 'template/cache volumes (created if does not exist).'), cfg.BoolOpt('sf_allow_template_caching', default=True, help='Create an internal cache of copy of images when ' 'a bootable volume is created to eliminate fetch from ' 'glance and qemu-conversion on subsequent calls.'), cfg.StrOpt('sf_svip', default=None, help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.BoolOpt('sf_enable_volume_mapping', default=True, help='Create an internal mapping of volume IDs and account. ' 'Optimizes lookups and performance at the expense of ' 'memory, very large deployments may want to consider ' 'setting to False.'), cfg.IntOpt('sf_api_port', default=443, min=1, max=65535, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.')] CONF = cfg.CONF CONF.register_opts(sf_opts) def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @six.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise exception.SolidFireAPIException(message=msg) return func_retry return retry_dec class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account """ VERSION = '2.0.2' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] cluster_stats = {} retry_exc_tuple = (exception.SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xNotReadyForIO'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(sf_opts) self._endpoint = self._build_endpoint_info() self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} try: self._update_cluster_status() except exception.SolidFireAPIException: pass if self.configuration.sf_allow_template_caching: account = self.configuration.sf_template_account_name self.template_account_id = self._create_template_account(account) self.target_driver = ( importutils.import_object( 'cinder.volume.drivers.solidfire.SolidFireISCSI', solidfire_driver=self, configuration=self.configuration)) def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = 'UUID-%s' % v['id'] sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if self.configuration.sf_enable_volume_mapping: self.volume_map[v['id']] = ( {'sf_id': sfvol['volumeID'], 'sf_account': sfvol['accountID'], 'cinder_account': v['project_id']}) if v.get('provider_id', 'nil') != sfvol['volumeID']: v['provider_id'] == sfvol['volumeID'] updates.append({'id': v['id'], 'provider_id': sfvol['volumeID']}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = None return (volume_updates, snapshot_updates) def _create_template_account(self, account_name): # We raise an API exception if the account doesn't exist # We need to take account_prefix settings into consideration # This just uses the same method to do template account create # as we use for any other OpenStack account account_name = self._get_sf_account_name(account_name) try: id = self._issue_api_request( 'GetAccountByName', {'username': account_name})['result']['account']['accountID'] except exception.SolidFireAPIException: chap_secret = self._generate_random_string(12) params = {'username': account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} id = self._issue_api_request('AddAccount', params)['result']['accountID'] return id def _build_endpoint_info(self, **kwargs): endpoint = {} endpoint['mvip'] = ( kwargs.get('mvip', self.configuration.san_ip)) endpoint['login'] = ( kwargs.get('login', self.configuration.san_login)) endpoint['passwd'] = ( kwargs.get('passwd', self.configuration.san_password)) endpoint['port'] = ( kwargs.get('port', self.configuration.sf_api_port)) endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], endpoint['port']) # TODO(jdg): consider a call to GetAPI and setting version return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None): if params is None: params = {} if endpoint is None: endpoint = self._endpoint payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=False, timeout=30) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) raise exception.SolidFireRetryableException(message=msg) if 'error' in response: msg = _('API response: %s') % response raise exception.SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] def _get_sfaccount_by_name(self, sf_account_name): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except exception.SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, project_id): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params) sfaccount = self._get_sfaccount_by_name(sf_account_name) return sfaccount def _get_cluster_info(self): """Query the SolidFire cluster for some property info.""" params = {} return self._issue_api_request('GetClusterInfo', params)['result'] def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set, length)) def _get_model_info(self, sfaccount, sf_volume_id): """Gets the connection info for specified account and volume.""" cluster_info = self._get_cluster_info() if self.configuration.sf_svip is None: iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' else: iscsi_portal = self.configuration.sf_svip chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID']) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_LE('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!'), sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) model_update['provider_id'] = ('%s' % sf_volume_id) return model_update def _do_clone_volume(self, src_uuid, src_project_id, vref): """Create a clone of an existing volume or snapshot.""" attributes = {} qos = {} sf_accounts = self._get_sfaccounts_for_tenant(vref['project_id']) if not sf_accounts: sf_account = self._create_sfaccount(vref['project_id']) else: # Check availability for creates sf_account = self._get_account_create_availability(sf_accounts) if not sf_account: # TODO(jdg): We're not doing tertiaries, so fail msg = _('volumes/account exceeded on both primary ' 'and secondary SolidFire accounts') raise exception.SolidFireDriverException(msg) params = {'name': 'UUID-%s' % vref['id'], 'newAccountID': sf_account['accountID']} # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False snap_name = 'UUID-%s' % src_uuid snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume( src_uuid, {'accountID': sf_account['accountID']}) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] if (self.configuration.sf_allow_tenant_qos and vref.get('volume_metadata')is not None): qos = self._set_qos_presets(vref) ctxt = context.get_admin_context() type_id = vref.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = {'volumeID': sf_volume_id} create_time = vref['created_at'].isoformat() attributes = {'uuid': vref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sf_account, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _do_volume_create(self, sf_account, params): sf_volid = self._issue_api_request( 'CreateVolume', params)['result']['volumeID'] return self._get_model_info(sf_account, sf_volid) def _do_snapshot_create(self, params): return self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning(_LW('More than one valid preset was ' 'detected, using %s'), presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _set_qos_by_volume_type(self, ctxt, type_id): qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) return qos def _get_sf_volume(self, uuid, params): vols = self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = meta.get('uuid', 'empty') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _create_image_volume(self, context, image_meta, image_service, image_id): # NOTE(jdg): It's callers responsibility to ensure that # the optional properties.virtual_size is set on the image # before we get here virt_size = int(image_meta['properties'].get('virtual_size')) min_sz_in_bytes = ( math.ceil(virt_size / float(units.Gi)) * float(units.Gi)) min_sz_in_gig = math.ceil(min_sz_in_bytes / float(units.Gi)) attributes = {} attributes['image_info'] = {} attributes['image_info']['image_updated_at'] = ( image_meta['updated_at'].isoformat()) attributes['image_info']['image_name'] = ( image_meta['name']) attributes['image_info']['image_created_at'] = ( image_meta['created_at'].isoformat()) attributes['image_info']['image_id'] = image_meta['id'] params = {'name': 'OpenStackIMG-%s' % image_id, 'accountID': self.template_account_id, 'sliceCount': 1, 'totalSize': int(min_sz_in_bytes), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': {}} sf_account = self._issue_api_request( 'GetAccountByID', {'accountID': self.template_account_id})['result']['account'] template_vol = self._do_volume_create(sf_account, params) tvol = {} tvol['id'] = image_id tvol['provider_location'] = template_vol['provider_location'] tvol['provider_auth'] = template_vol['provider_auth'] connector = 'na' conn = self.initialize_connection(tvol, connector) attach_info = super(SolidFireDriver, self)._connect_device(conn) properties = 'na' try: image_utils.fetch_to_raw(context, image_service, image_id, attach_info['device']['path'], self.configuration.volume_dd_blocksize, size=min_sz_in_gig) except Exception as exc: params['volumeID'] = template_vol['volumeID'] LOG.error(_LE('Failed image conversion during cache creation: %s'), exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', template_vol['volumeID']) self._detach_volume(context, attach_info, tvol, properties) self._issue_api_request('DeleteVolume', params) return self._detach_volume(context, attach_info, tvol, properties) sf_vol = self._get_sf_volume(image_id, params) LOG.debug('Successfully created SolidFire Image Template ' 'for image-id: %s', image_id) return sf_vol def _verify_image_volume(self, context, image_meta, image_service): # This method just verifies that IF we have a cache volume that # it's still up to date and current WRT the image in Glance # ie an image-update hasn't occurred since we grabbed it # If it's out of date, just delete it and we'll create a new one # Any other case we don't care and just return without doing anything params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) if sf_vol is None: return # Check updated_at field, delete copy and update if needed if sf_vol['attributes']['image_info']['image_updated_at'] == ( image_meta['updated_at'].isoformat()): return else: # Bummer, it's been updated, delete it params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) if not self._create_image_volume(context, image_meta, image_service, image_meta['id']): msg = _("Failed to create SolidFire Image-Volume") raise exception.SolidFireAPIException(msg) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( 'ListAccounts', {})['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if cinder_project_id in acc['username']]) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_all_deleted_volumes(self, cinder_uuid=None): params = {} vols = self._issue_api_request('ListDeletedVolumes', params)['result']['volumes'] if cinder_uuid: deleted_vols = ([v for v in vols if cinder_uuid in v['name']]) else: deleted_vols = [v for v in vols] return deleted_vols def _get_account_create_availability(self, accounts): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if self._get_volumes_for_account( acc['accountID']) > self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') return sfaccount return None def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional params = {'accountID': sf_account_id} vols = self._issue_api_request('ListVolumesForAccount', params)['result']['volumes'] if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def clone_image(self, context, volume, image_location, image_meta, image_service): # Check out pre-requisites: # Is template caching enabled? if not self.configuration.sf_allow_template_caching: return None, False # Is the image owned by this tenant or public? if ((not image_meta.get('is_public', False)) and (image_meta['owner'] != volume['project_id'])): LOG.warning(_LW("Requested image is not " "accessible by current Tenant.")) return None, False # Is virtual_size property set on the image? if ((not image_meta.get('properties', None)) or (not image_meta['properties'].get('virtual_size', None))): LOG.info(_LI('Unable to create cache volume because image: %s ' 'does not include properties.virtual_size'), image_meta['id']) return None, False try: self._verify_image_volume(context, image_meta, image_service) except exception.SolidFireAPIException: return None, False account = self.configuration.sf_template_account_name try: (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], account, volume) except exception.VolumeNotFound: if self._create_image_volume(context, image_meta, image_service, image_meta['id']) is None: # We failed, dump out return None, False # Ok, should be good to go now, try it again (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], account, volume) return model, True def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ slice_count = 1 attributes = {} qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume['volume_type_id'] if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) create_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'created_at': create_time} if qos: for k, v in qos.items(): attributes[k] = str(v) sf_accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if not sf_accounts: sf_account = self._create_sfaccount(volume['project_id']) else: sf_account = self._get_account_create_availability(sf_accounts) params = {'name': 'UUID-%s' % volume['id'], 'accountID': sf_account['accountID'], 'sliceCount': slice_count, 'totalSize': int(volume['size'] * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') params['name'] = 'UUID-%s' % v params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v return self._do_volume_create(sf_account, params) def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], src_vref['project_id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) LOG.error(_LE("This usually means the volume was never " "successfully created.")) return for acc in accounts: sf_vol = self._get_volumes_for_account(acc['accountID'], volume['id'])[0] if sf_vol: break if sf_vol is not None: params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) else: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = 'UUID-%s' % snapshot['id'] accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for a in accounts: params = {'accountID': a['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return # Make sure it's not "old style" using clones as snaps LOG.debug("Snapshot not found, checking old style clones.") self.delete_volume(snapshot) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!"), snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': 'UUID-%s' % snapshot['id']} self._do_snapshot_create(params) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], snapshot['project_id'], volume) return model def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except exception.SolidFireAPIException: pass return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi) } self._issue_api_request('ModifyVolume', params, version='5.0') def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} # NOTE(jdg): The SF api provides an UNBELIEVABLE amount # of stats data, this is just one of the calls results = self._issue_api_request('GetClusterCapacity', params) results = results['result']['clusterCapacity'] free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['total_capacity_gb'] = ( float(results['maxProvisionedSpace'] / units.Gi)) data['free_capacity_gb'] = float(free_capacity / units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['compression_percent'] = ( results['compressionPercent']) data['deduplicaton_percent'] = ( results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) self.cluster_stats = data def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def detach_volume(self, context, volume, attachment=None): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "detach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._create_sfaccount(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id']) if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes self._issue_api_request('ModifyVolume', params) return True def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._create_sfaccount(volume['project_id']) attributes = {} qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} if qos: for k, v in qos.items(): attributes[k] = str(v) params = {'name': volume['name'], 'volumeID': sf_ref['volumeID'], 'accountID': sfaccount['accountID'], 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} self._issue_api_request('ModifyVolume', params, version='5.0') return self._get_model_info(sfaccount, sf_ref['volumeID']) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': <name of existing volume on SF Cluster>} """ sfid = external_ref.get('source-id', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] return int(vols[0]['totalSize']) / int(units.Gi) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!"), volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') # #### Interface methods for transport layer #### # # TODO(jdg): SolidFire can mix and do iSCSI and FC on the # same cluster, we'll modify these later to check based on # the volume info if we need an FC target driver or an # iSCSI target driver def ensure_export(self, context, volume): return self.target_driver.ensure_export(context, volume, None) def create_export(self, context, volume, connector): return self.target_driver.create_export( context, volume, None) def remove_export(self, context, volume): return self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): return self.target_driver.terminate_connection(volume, connector, **kwargs) class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def _do_iscsi_export(self, volume): sfaccount = self.sf_driver._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except exception.SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass
{ "repo_name": "duhzecca/cinder", "path": "cinder/volume/drivers/solidfire.py", "copies": "1", "size": "51454", "license": "apache-2.0", "hash": 5054135275388927000, "line_mean": 39.4194815397, "line_max": 79, "alpha_frac": 0.547226649, "autogenerated": false, "ratio": 4.32277577081408, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000053954080160419005, "num_lines": 1273 }
import json import math import random import socket import string import time import warnings from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units import requests from requests.packages.urllib3 import exceptions import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver from cinder.volume import volume_types LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.BoolOpt('sf_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('sf_account_prefix', help='Create SolidFire accounts with this prefix. Any string ' 'can be used here, but the string \"hostname\" is special ' 'and will create a prefix using the cinder node hostname ' '(previous default behavior). The default is NO prefix.'), cfg.StrOpt('sf_volume_prefix', default='UUID-', help='Create SolidFire volumes with this prefix. Volume names ' 'are of the form <sf_volume_prefix><cinder-volume-id>. ' 'The default is to use a prefix of \'UUID-\'.'), cfg.StrOpt('sf_template_account_name', default='openstack-vtemplate', help='Account name on the SolidFire Cluster to use as owner of ' 'template/cache volumes (created if does not exist).'), cfg.BoolOpt('sf_allow_template_caching', default=True, help='Create an internal cache of copy of images when ' 'a bootable volume is created to eliminate fetch from ' 'glance and qemu-conversion on subsequent calls.'), cfg.StrOpt('sf_svip', help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.BoolOpt('sf_enable_volume_mapping', default=True, help='Create an internal mapping of volume IDs and account. ' 'Optimizes lookups and performance at the expense of ' 'memory, very large deployments may want to consider ' 'setting to False.'), cfg.IntOpt('sf_api_port', default=443, min=1, max=65535, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.')] CONF = cfg.CONF CONF.register_opts(sf_opts) def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @six.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise exception.SolidFireAPIException(message=msg) return func_retry return retry_dec class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account """ VERSION = '2.0.2' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] cluster_stats = {} retry_exc_tuple = (exception.SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xNotReadyForIO'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.cluster_uuid = None self.configuration.append_config_values(sf_opts) self._endpoint = self._build_endpoint_info() self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} try: self._update_cluster_status() except exception.SolidFireAPIException: pass if self.configuration.sf_allow_template_caching: account = self.configuration.sf_template_account_name self.template_account_id = self._create_template_account(account) self.target_driver = SolidFireISCSI(solidfire_driver=self, configuration=self.configuration) self._set_cluster_uuid() def __getattr__(self, attr): return getattr(self.target_driver, attr) def _set_cluster_uuid(self): self.cluster_uuid = ( self._get_cluster_info()['clusterInfo']['uuid']) def _parse_provider_id_string(self, id_string): return tuple(id_string.split()) def _create_provider_id_string(self, resource_id, account_or_vol_id, cluster_uuid=None): # NOTE(jdg): We use the same format, but in the case # of snapshots, we don't have an account id, we instead # swap that with the parent volume id cluster_id = self.cluster_uuid # We allow specifying a remote cluster if cluster_uuid: cluster_id = cluster_uuid return "%s %s %s" % (resource_id, account_or_vol_id, cluster_id) def _init_snapshot_mappings(self, srefs): updates = [] sf_snaps = self._issue_api_request( 'ListSnapshots', {}, version='6.0')['result']['snapshots'] for s in srefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) sfsnap = next( (ss for ss in sf_snaps if ss['name'] == seek_name), None) if sfsnap: id_string = self._create_provider_id_string( sfsnap['snapshotID'], sfsnap['volumeID']) if s.get('provider_id') != id_string: updates.append( {'id': s['id'], 'provider_id': id_string}) return updates def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if v.get('provider_id', 'nil') != sfvol['volumeID']: v['provider_id'] == sfvol['volumeID'] updates.append( {'id': v['id'], 'provider_id': self._create_provider_id_string( sfvol['volumeID'], sfvol['accountID'])}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = self._init_snapshot_mappings(snaprefs) return (volume_updates, snapshot_updates) def _create_template_account(self, account_name): # We raise an API exception if the account doesn't exist # We need to take account_prefix settings into consideration # This just uses the same method to do template account create # as we use for any other OpenStack account account_name = self._get_sf_account_name(account_name) try: id = self._issue_api_request( 'GetAccountByName', {'username': account_name})['result']['account']['accountID'] except exception.SolidFireAPIException: chap_secret = self._generate_random_string(12) params = {'username': account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} id = self._issue_api_request('AddAccount', params)['result']['accountID'] return id def _build_endpoint_info(self, **kwargs): endpoint = {} endpoint['mvip'] = ( kwargs.get('mvip', self.configuration.san_ip)) endpoint['login'] = ( kwargs.get('login', self.configuration.san_login)) endpoint['passwd'] = ( kwargs.get('passwd', self.configuration.san_password)) endpoint['port'] = ( kwargs.get('port', self.configuration.sf_api_port)) endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], endpoint['port']) # TODO(jdg): consider a call to GetAPI and setting version return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None): if params is None: params = {} if endpoint is None: endpoint = self._endpoint payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=False, timeout=30) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) raise exception.SolidFireRetryableException(message=msg) if 'error' in response: msg = _('API response: %s') % response raise exception.SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] def _get_sfaccount_by_name(self, sf_account_name): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except exception.SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, project_id): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params) sfaccount = self._get_sfaccount_by_name(sf_account_name) return sfaccount def _get_cluster_info(self): """Query the SolidFire cluster for some property info.""" params = {} return self._issue_api_request('GetClusterInfo', params)['result'] def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set, length)) def _get_model_info(self, sfaccount, sf_volume_id): """Gets the connection info for specified account and volume.""" cluster_info = self._get_cluster_info() if self.configuration.sf_svip is None: iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' else: iscsi_portal = self.configuration.sf_svip chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID']) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_LE('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!'), sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) model_update['provider_id'] = ( self._create_provider_id_string(sf_volume_id, sfaccount['accountID'], self.cluster_uuid)) return model_update def _do_clone_volume(self, src_uuid, src_project_id, vref): """Create a clone of an existing volume or snapshot.""" attributes = {} qos = {} sf_accounts = self._get_sfaccounts_for_tenant(vref['project_id']) if not sf_accounts: sf_account = self._create_sfaccount(vref['project_id']) else: # Check availability for creates sf_account = self._get_account_create_availability(sf_accounts) if not sf_account: # TODO(jdg): We're not doing tertiaries, so fail msg = _('volumes/account exceeded on both primary ' 'and secondary SolidFire accounts') raise exception.SolidFireDriverException(msg) params = {'name': '%s%s' % (self.configuration.sf_volume_prefix, vref['id']), 'newAccountID': sf_account['accountID']} # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume(src_uuid) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] if (self.configuration.sf_allow_tenant_qos and vref.get('volume_metadata')is not None): qos = self._set_qos_presets(vref) ctxt = context.get_admin_context() type_id = vref.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = {'volumeID': sf_volume_id} create_time = vref['created_at'].isoformat() attributes = {'uuid': vref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sf_account, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _do_volume_create(self, sf_account, params): sf_volid = self._issue_api_request( 'CreateVolume', params)['result']['volumeID'] return self._get_model_info(sf_account, sf_volid) def _do_snapshot_create(self, params): model_update = {} snapshot_id = self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] snaps = self._get_sf_snapshots() snap = ( next((s for s in snaps if int(s["snapshotID"]) == int(snapshot_id)), None)) model_update['provider_id'] = ( self._create_provider_id_string(snap['snapshotID'], snap['volumeID'], self.cluster_uuid)) return model_update def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning(_LW('More than one valid preset was ' 'detected, using %s'), presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _set_qos_by_volume_type(self, ctxt, type_id): qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) return qos def _get_sf_volume(self, uuid, params=None): if params: vols = self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] else: vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = '' if meta: alt_id = meta.get('uuid', '') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _create_image_volume(self, context, image_meta, image_service, image_id): with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi)) attributes = {} attributes['image_info'] = {} attributes['image_info']['image_updated_at'] = ( image_meta['updated_at'].isoformat()) attributes['image_info']['image_name'] = ( image_meta['name']) attributes['image_info']['image_created_at'] = ( image_meta['created_at'].isoformat()) attributes['image_info']['image_id'] = image_meta['id'] params = {'name': 'OpenStackIMG-%s' % image_id, 'accountID': self.template_account_id, 'sliceCount': 1, 'totalSize': int(virtual_size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': {}} sf_account = self._issue_api_request( 'GetAccountByID', {'accountID': self.template_account_id})['result']['account'] template_vol = self._do_volume_create(sf_account, params) tvol = {} tvol['id'] = image_id tvol['provider_location'] = template_vol['provider_location'] tvol['provider_auth'] = template_vol['provider_auth'] connector = {'multipath': False} conn = self.initialize_connection(tvol, connector) attach_info = super(SolidFireDriver, self)._connect_device(conn) properties = 'na' try: image_utils.convert_image(tmp_image, attach_info['device']['path'], 'raw', run_as_root=True) data = image_utils.qemu_img_info(attach_info['device']['path'], run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % {'vol_format': 'raw', 'file_format': data. file_format}) except Exception as exc: vol = self._get_sf_volume(image_id) LOG.error(_LE('Failed image conversion during ' 'cache creation: %s'), exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', vol['volumeID']) self._detach_volume(context, attach_info, tvol, properties) self._issue_api_request('DeleteVolume', params) return self._detach_volume(context, attach_info, tvol, properties) sf_vol = self._get_sf_volume(image_id, params) LOG.debug('Successfully created SolidFire Image Template ' 'for image-id: %s', image_id) return sf_vol def _verify_image_volume(self, context, image_meta, image_service): # This method just verifies that IF we have a cache volume that # it's still up to date and current WRT the image in Glance # ie an image-update hasn't occurred since we grabbed it # If it's out of date, just delete it and we'll create a new one # Any other case we don't care and just return without doing anything params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) if sf_vol is None: return # Check updated_at field, delete copy and update if needed if sf_vol['attributes']['image_info']['image_updated_at'] == ( image_meta['updated_at'].isoformat()): return else: # Bummer, it's been updated, delete it params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) if not self._create_image_volume(context, image_meta, image_service, image_meta['id']): msg = _("Failed to create SolidFire Image-Volume") raise exception.SolidFireAPIException(msg) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( 'ListAccounts', {})['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if cinder_project_id in acc['username']]) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_all_deleted_volumes(self, cinder_uuid=None): params = {} vols = self._issue_api_request('ListDeletedVolumes', params)['result']['volumes'] if cinder_uuid: deleted_vols = ([v for v in vols if cinder_uuid in v['name']]) else: deleted_vols = [v for v in vols] return deleted_vols def _get_account_create_availability(self, accounts): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if self._get_volumes_for_account( acc['accountID']) > self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') return sfaccount return None def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional params = {'accountID': sf_account_id} vols = self._issue_api_request('ListVolumesForAccount', params)['result']['volumes'] if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def clone_image(self, context, volume, image_location, image_meta, image_service): public = False # Check out pre-requisites: # Is template caching enabled? if not self.configuration.sf_allow_template_caching: return None, False # NOTE(jdg): Glance V2 moved from is_public to visibility # so we check both, as we don't necessarily know or want # to care which we're using. Will need to look at # future handling of things like shared and community # but for now, it's owner or public and that's it visibility = image_meta.get('visibility', None) if visibility and visibility == 'public': public = True elif image_meta.get('is_public', False): public = True else: if image_meta['owner'] == volume['project_id']: public = True if not public: LOG.warning(_LW("Requested image is not " "accessible by current Tenant.")) return None, False try: self._verify_image_volume(context, image_meta, image_service) except exception.SolidFireAPIException: return None, False account = self.configuration.sf_template_account_name try: (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], account, volume) except exception.VolumeNotFound: if self._create_image_volume(context, image_meta, image_service, image_meta['id']) is None: # We failed, dump out return None, False # Ok, should be good to go now, try it again (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], account, volume) return model, True def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ slice_count = 1 attributes = {} qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume['volume_type_id'] if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) create_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'created_at': create_time} if qos: for k, v in qos.items(): attributes[k] = str(v) sf_accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if not sf_accounts: sf_account = self._create_sfaccount(volume['project_id']) else: sf_account = self._get_account_create_availability(sf_accounts) vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id']) params = {'name': vname, 'accountID': sf_account['accountID'], 'sliceCount': slice_count, 'totalSize': int(volume['size'] * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') vname = '%s%s' % (self.configuration.sf_volume_prefix, v) params['name'] = vname params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v return self._do_volume_create(sf_account, params) def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], src_vref['project_id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ sf_vol = None accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) LOG.error(_LE("This usually means the volume was never " "successfully created.")) return for acc in accounts: vols = self._get_volumes_for_account(acc['accountID'], volume['id']) if vols: sf_vol = vols[0] break if sf_vol is not None: params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) else: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id']) accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for acct in accounts: params = {'accountID': acct['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol: sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return # Make sure it's not "old style" using clones as snaps LOG.debug("Snapshot not found, checking old style clones.") self.delete_volume(snapshot) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!"), snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id'])} return self._do_snapshot_create(params) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], snapshot['project_id'], volume) return model def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except exception.SolidFireAPIException: pass return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi) } self._issue_api_request('ModifyVolume', params, version='5.0') def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} # NOTE(jdg): The SF api provides an UNBELIEVABLE amount # of stats data, this is just one of the calls results = self._issue_api_request('GetClusterCapacity', params) results = results['result']['clusterCapacity'] free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['total_capacity_gb'] = ( float(results['maxProvisionedSpace'] / units.Gi)) data['free_capacity_gb'] = float(free_capacity / units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['compression_percent'] = ( results['compressionPercent']) data['deduplicaton_percent'] = ( results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) self.cluster_stats = data def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def detach_volume(self, context, volume, attachment=None): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "detach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._create_sfaccount(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id']) if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes self._issue_api_request('ModifyVolume', params) return True def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._create_sfaccount(volume['project_id']) attributes = {} qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} if qos: for k, v in qos.items(): attributes[k] = str(v) params = {'name': volume['name'], 'volumeID': sf_ref['volumeID'], 'accountID': sfaccount['accountID'], 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} self._issue_api_request('ModifyVolume', params, version='5.0') return self._get_model_info(sfaccount, sf_ref['volumeID']) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': <name of existing volume on SF Cluster>} """ sfid = external_ref.get('source-id', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] return int(vols[0]['totalSize']) / int(units.Gi) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!"), volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def __getattr__(self, attr): return getattr(self.sf_driver, attr) def _do_iscsi_export(self, volume): sfaccount = self._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except exception.SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass
{ "repo_name": "tobegit3hub/cinder_docker", "path": "cinder/volume/drivers/solidfire.py", "copies": "1", "size": "54639", "license": "apache-2.0", "hash": 2467060496773283000, "line_mean": 39.9587706147, "line_max": 79, "alpha_frac": 0.5363385128, "autogenerated": false, "ratio": 4.359262805169938, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00003856234237921182, "num_lines": 1334 }
# All rights reserved. from os.path import join from setuptools import setup, find_packages info = {} execfile(join('mapping', '__init__.py'), info) setup( name = 'mapping', version = info['__version__'], author = 'Enthought, Inc.', author_email = 'info@enthought.com', maintainer = 'ETS Developers', maintainer_email = 'enthought-dev@enthought.com', #url = 'https://github.com/enthought/enable-mapping', #download_url = ('http://www.enthought.com/repo/ets/enable-mapping-%s.tar.gz' % # info['__version__']), classifiers = [c.strip() for c in """\ Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: BSD License Operating System :: MacOS Operating System :: Microsoft :: Windows Operating System :: OS Independent Operating System :: POSIX Operating System :: Unix Programming Language :: Python Topic :: Scientific/Engineering Topic :: Software Development Topic :: Software Development :: Libraries """.splitlines() if len(c.strip()) > 0], description = 'application tools', long_description = open('README.rst').read(), include_package_data = True, package_data = { 'mapping.enable': ['fonts/*'] }, install_requires = info['__requires__'], license = 'BSD', packages = find_packages(), platforms = ["Windows", "Linux", "Mac OS-X", "Unix", "Solaris"], zip_safe = True, )
{ "repo_name": "nmichaud/enable-mapping", "path": "setup.py", "copies": "1", "size": "1581", "license": "bsd-3-clause", "hash": 1586245213786174700, "line_mean": 33.3695652174, "line_max": 83, "alpha_frac": 0.6072106262, "autogenerated": false, "ratio": 4.043478260869565, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5150688887069566, "avg_score": null, "num_lines": null }
# All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import os import logging import uuid import yaml import json from aiohttp import web LOG = logging.getLogger(__name__) class AWS: """AWS layout: /2009-04-04/meta-data/instance-id """ def __init__(self, server): self.server = server def instance_id(self, request): return web.Response(body=b"not implemented") def meta_data(self, request): return web.Response(body=b"not implemented") def user_data(self, request): return web.Response(body=b"not implemented") def handler(self, request): path = request.match_info["path"] LOG.info("PATH %s" % path) if path == "instance-id": return self.instance_id(request) if path == "user-data": return self.user_data(request) if path == "meta-data/": return self.meta_data(request) return web.Response(body=b"Not found", status=404) def index(self, request): return web.Response(body=b"Not found", status=404) class OpenStack: """OpenStack datasource. layout: /openstack/2012-08-10/user_data /openstack/2012-08-10/meta_data.json /openstack/latest/ -- alias for 2012-08-10 """ def __init__(self, server): self.server = server def get_metadata(self, request): remote_host = request.transport.get_extra_info("peername")[0] LOG.info("Metadata request from %s" % remote_host) keys = {} for i, key in enumerate(self.server.ssh_public_keys): keys["key-" + str(i)] = key return json.dumps({ "uuid": str(uuid.uuid4()), "availability_zone": "nova", "hostname": "vm-%s" % remote_host.replace(".", "-"), "launch_index": 0, "meta": { "priority": "low", "role": "vm-on-localhost", }, "public_keys": keys, "name": "test" }).encode("utf8") def user_data(self, request): version = request.match_info["version"] if version not in ("2012-08-10", "latest"): return web.Response(body=b"Not found", status=404) return web.Response(body=self.server.user_data) def meta_data(self, request): version = request.match_info["version"] if version not in ("2012-08-10", "latest"): return web.Response(body=b"Not found", status=404) return web.Response(body=self.get_metadata(request), content_type="application/json") @asyncio.coroutine def handler(self, request): if request.match_info["version"] not in ("2012-08-10", "latest"): return web.Response(b"Not found\n", status=404) path = request.match_info["path"] if path == "meta_data.json": return self.meta_data(request) if path == "user_data": return self.user_data(request) if path == "": return web.Response(body=b"user_data\nmeta_data.json") return web.Response(body=b"Not found", status=404) @asyncio.coroutine def index(self, request): return web.Response(body=b"2012-08-10/\nlatest") class Server: """Metadata server for cloud-init.""" def __init__(self, loop, **config): self.loop = loop self.config = config self.ssh_public_keys = [] for f in config.get("ssh_keys", []): if not os.path.isfile(f): continue with open(f) as kf: for line in kf: if line: self.ssh_public_keys.append(line.strip()) self.user_data = { "manage_etc_hosts": True, "disable_root": 0, "ssh_pwauth": True, "ssh_authorized_keys": self.ssh_public_keys, } self.user_data = b"#cloud-config\n" + yaml.safe_dump( self.user_data, default_flow_style=False).encode("utf8") self._cache = {} @asyncio.coroutine def run(self): openstack = OpenStack(self) aws = AWS(self) self.app = web.Application(loop=self.loop) r = self.app.router.add_route r("GET", "/", self.index) r("GET", "/openstack", openstack.index) r("GET", "/openstack/", openstack.index) r("GET", "/openstack/{version}/{path:.*}", openstack.handler) r("GET", "/{version:.+}/{path:.+}", aws.handler) r("GET", "/{path:.+}", aws.index) self.handler = self.app.make_handler() addr = self.config.get("listen_addr", "0.0.0.0") port = self.config.get("listen_port", 8088) self.srv = yield from self.loop.create_server(self.handler, addr, port) LOG.info("Metadata server started at %s:%s" % (addr, port)) try: yield from self.srv.wait_closed() except asyncio.CancelledError: pass except KeyboardInterrupt: return yield from self.handler.finish_connections(1.0) self.srv.close() yield from self.srv.wait_closed() yield from self.app.finish() @asyncio.coroutine def index(self, request): return web.Response(body=b"/openstack/")
{ "repo_name": "redixin/clis", "path": "clis/clis.py", "copies": "1", "size": "5903", "license": "apache-2.0", "hash": -1871241283596994800, "line_mean": 32.3502824859, "line_max": 79, "alpha_frac": 0.5712349653, "autogenerated": false, "ratio": 3.833116883116883, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9904351848416884, "avg_score": 0, "num_lines": 177 }
# All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for threadlocal dict module.""" import random import string import threading import unittest from six.moves import queue from six.moves import xrange from simpl import threadlocal class TestThreadLocal(unittest.TestCase): def setUp(self): # clear the threadlocal dict threadlocal.default().clear() def get_some_text(self, length=16): return ''.join( [random.choice(string.ascii_letters) for _ in xrange(length)]) def test_default(self): self.assertIs(threadlocal.default(), threadlocal.CONTEXT) def test_threadlocal_dict_repr(self): tld = threadlocal.default() tld['good'] = 'glob' repr_str = repr(tld) expected = "<ThreadLocalDict {'good': 'glob'}>" self.assertEqual(repr_str, expected) def test_correct_threadlocal_dicts(self): key = 'value' num_threads = 5 def get_tld(container): current = threadlocal.default() current[key] = self.get_some_text() container.put(current._get_local_dict()) local_dicts = queue.Queue() threads = [] for _ in xrange(num_threads): t = threading.Thread(target=get_tld, args=(local_dicts,)) t.start() threads.append(t) # and one to grow on threadlocal.default()[key] = self.get_some_text() local_dicts.put(threadlocal.default()._get_local_dict()) for thread in threads: thread.join() values = set() while not local_dicts.empty(): tld = local_dicts.get(block=False) values.add(tld[key]) # assert 6 dicts were created with 6 different values self.assertEqual(len(values), num_threads+1) def test_non_default_namespace(self): tld = threadlocal.default() another = threadlocal.ThreadLocalDict('custom_namespace') self.assertIsNot(tld._get_local_dict(), another._get_local_dict()) def test_default_namespace(self): tld = threadlocal.default() another = threadlocal.ThreadLocalDict(threadlocal.DEFAULT_NAMESPACE) self.assertIs(tld._get_local_dict(), another._get_local_dict()) def test_equality_same_namespace(self): namespace = self.get_some_text() instance_one = threadlocal.ThreadLocalDict(namespace) instance_two = threadlocal.ThreadLocalDict(namespace) self.assertEqual(instance_one, instance_two) def test_equality_different_namespace(self): key, value = 'key', 'value' instance_one = threadlocal.ThreadLocalDict('foo') instance_one[key] = value instance_two = threadlocal.ThreadLocalDict('bar') instance_two[key] = value self.assertEqual(instance_one, instance_two) def test_not_same(self): namespace = self.get_some_text() instance_one = threadlocal.ThreadLocalDict(namespace) instance_two = threadlocal.ThreadLocalDict(namespace) self.assertIsNot(instance_one, instance_two) if __name__ == '__main__': unittest.main()
{ "repo_name": "larsbutler/simpl", "path": "tests/test_threadlocal.py", "copies": "4", "size": "3688", "license": "apache-2.0", "hash": 4771722350548773000, "line_mean": 32.8348623853, "line_max": 78, "alpha_frac": 0.6480477223, "autogenerated": false, "ratio": 3.9655913978494626, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6613639120149464, "avg_score": null, "num_lines": null }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_conntrack from neutron.tests import base class IPConntrackTestCase(base.BaseTestCase): def setUp(self): super(IPConntrackTestCase, self).setUp() self.execute = mock.Mock() self.mgr = ip_conntrack.IpConntrackManager(self._zone_lookup, self.execute) def _zone_lookup(self, dev): return 100 def test_delete_conntrack_state_dedupes(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress'} dev_info = {'device': 'device', 'fixed_ips': ['1.2.3.4']} dev_info_list = [dev_info for _ in range(10)] self.mgr._delete_conntrack_state(dev_info_list, rule) self.assertEqual(1, len(self.execute.mock_calls))
{ "repo_name": "MaximNevrov/neutron", "path": "neutron/tests/unit/agent/linux/test_ip_conntrack.py", "copies": "7", "size": "1381", "license": "apache-2.0", "hash": -8875293384320000000, "line_mean": 36.3243243243, "line_max": 77, "alpha_frac": 0.6618392469, "autogenerated": false, "ratio": 3.7425474254742546, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7904386672374255, "avg_score": null, "num_lines": null }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cloudevents.sdk.event import v03 def test_v03_time_property(): event = v03.Event() time1 = "1234" event.time = time1 assert event.EventTime() == time1 time2 = "4321" event.SetEventTime(time2) assert event.time == time2 def test_v03_subject_property(): event = v03.Event() subject1 = "<my-subject>" event.subject = subject1 assert event.Subject() == subject1 subject2 = "<my-subject2>" event.SetSubject(subject2) assert event.subject == subject2 def test_v03_schema_url_property(): event = v03.Event() schema_url1 = "<my-schema>" event.schema_url = schema_url1 assert event.SchemaURL() == schema_url1 schema_url2 = "<my-schema2>" event.SetSchemaURL(schema_url2) assert event.schema_url == schema_url2 def test_v03_datacontentencoding_property(): event = v03.Event() datacontentencoding1 = "<my-datacontentencoding>" event.datacontentencoding = datacontentencoding1 assert event.ContentEncoding() == datacontentencoding1 datacontentencoding2 = "<my-datacontentencoding2>" event.SetContentEncoding(datacontentencoding2) assert event.datacontentencoding == datacontentencoding2
{ "repo_name": "cloudevents/sdk-python", "path": "cloudevents/tests/test_v03_event.py", "copies": "1", "size": "1804", "license": "apache-2.0", "hash": -7771867772360658000, "line_mean": 28.0967741935, "line_max": 78, "alpha_frac": 0.7045454545, "autogenerated": false, "ratio": 3.6152304609218437, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9819775915421844, "avg_score": 0, "num_lines": 62 }
# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # A declaration of a dummy neutron extension: this may have a number of # constants being defined, and their aim is to document as much about # the extension as possible. # The alias of the extension. ALIAS = 'dummy' # Whether or not this extension is simply signaling behavior to the user # or it actively modifies the attribute map (mandatory). IS_SHIM_EXTENSION = False # Whether the extension is marking the adoption of standardattr model for # legacy resources, or introducing new standardattr attributes. False or # None if the standardattr model is adopted since the introduction of # resource extension (mandatory). # If this is True, the alias for the extension should be prefixed with # 'standard-attr-'. IS_STANDARD_ATTR_EXTENSION = False # The name of the extension (mandatory). NAME = 'Foo Extension' # A prefix for API resources. An empty prefix means that the API is going # to be exposed at the v2/ level as any other core resource (mandatory). API_PREFIX = '' # The description of the extension (mandatory). DESCRIPTION = "Provides support for foo" # A timestamp of when the extension was introduced (mandatory). UPDATED_TIMESTAMP = "2000-00-01T00:00:00-00:00" # The specific resources and/or attributes for the extension (optional). # In case of simple extensions, with single resource, the string constants # RESOURCE_NAME and COLLECTION_NAME can be used, otherwise string literals # can be used instead. # The name of the resource introduced or being extended # (in case it is defined by another extension, or it is # a core resource). RESOURCE_NAME = 'foo' # The plural for the resource introduced or being extended # (in case it is defined by another extension, or it is a # core resource). COLLECTION_NAME = 'fooes' # The resource attribute map for the extension. It is effectively the # bulk of the API contract alongside ACTION_MAP (mandatory). RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, } } # The subresource attribute map for the extension. It adds child resources # to main extension's resource. The subresource map must have a parent and # a parameters entry. If an extension does not need such a map, None can # be specified (mandatory). # Note that if an existing sub-resource is being extended, the # existing resources to extend the new extension attributes must be # defined under the 'parameters' key. SUB_RESOURCE_ATTRIBUTE_MAP = { 'subfoo': { 'parent': { 'collection_name': COLLECTION_NAME, 'member_name': RESOURCE_NAME}, 'parameters': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, }, }, } # The action map: it associates verbs with methods to be performed on # the API resource (mandatory). For example: # # ACTION_MAP = { # RESOURCE_NAME: { # 'add_my_foo_bars': 'PUT', # 'remove_my_foo_bars': 'PUT', # 'get_my_foo_bars': 'GET' # } # } ACTION_MAP = { } # The action status: it associates response statuses with methods to be # performed on the API resource (mandatory). For example: # # ACTION_STATUS = { # 'create': 201, # 'delete': 204 # } ACTION_STATUS = { } # The list of required extensions (mandatory). REQUIRED_EXTENSIONS = [ ] # The list of optional extensions (mandatory). OPTIONAL_EXTENSIONS = [ ] # TODO(armax): add support for modeling custom queries
{ "repo_name": "openstack/neutron-lib", "path": "neutron_lib/api/definitions/_dummy.py", "copies": "1", "size": "4228", "license": "apache-2.0", "hash": -1190290167132132000, "line_mean": 33.0967741935, "line_max": 78, "alpha_frac": 0.6960737938, "autogenerated": false, "ratio": 3.861187214611872, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 124 }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_config import cfg import oslo_messaging as messaging from oslo_service import periodic_task from karbor.db import base from karbor import version CONF = cfg.CONF class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' target = messaging.Target(version=RPC_API_VERSION) def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host self.additional_endpoints = [] super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self, **kwargs): """Handle initialization if this is a standalone service. A hook point for services to execute tasks before the services are made available (i.e. showing up on RPC and starting to accept RPC calls) to other components. Child classes should override this method. """ pass def cleanup_host(self): """Hook to do cleanup work when the service shuts down. Child classes should override this method. """ pass def init_host_with_rpc(self): """A hook for service to do jobs after RPC is ready. Like init_host(), this method is a hook where services get a chance to execute tasks that *need* RPC. Child classes should override this method. """ pass def service_version(self): return version.version_string() def service_config(self): config = {} for key in CONF: config[key] = CONF.get(key, None) return config def is_working(self): """Method indicating if service is working correctly. This method is supposed to be overridden by subclasses and return if manager is working correctly. """ return True
{ "repo_name": "openstack/smaug", "path": "karbor/manager.py", "copies": "1", "size": "3771", "license": "apache-2.0", "hash": 573795395200449800, "line_mean": 31.2307692308, "line_max": 79, "alpha_frac": 0.6950411032, "autogenerated": false, "ratio": 4.299885974914481, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5494927078114481, "avg_score": null, "num_lines": null }
# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron import manager from neutron.plugins.common import constants # Flavor Exceptions class FlavorNotFound(nexception.NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorInUse(nexception.InUse): message = _("Flavor %(flavor_id)s is used by some service instance.") class ServiceProfileNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s could not be found.") class ServiceProfileInUse(nexception.InUse): message = _("Service Profile %(sp_id)s is used by some service instance.") class FlavorServiceProfileBindingExists(nexception.Conflict): message = _("Service Profile %(sp_id)s is already associated " "with flavor %(fl_id)s.") class FlavorServiceProfileBindingNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s is not associated " "with flavor %(fl_id)s.") class ServiceProfileDriverNotFound(nexception.NotFound): message = _("Service Profile driver %(driver)s could not be found.") class ServiceProfileEmpty(nexception.InvalidInput): message = _("Service Profile needs either a driver or metainfo.") class FlavorDisabled(nexception.ServiceUnavailable): message = _("Flavor is not enabled.") class ServiceProfileDisabled(nexception.ServiceUnavailable): message = _("Service Profile is not enabled.") class InvalidFlavorServiceType(nexception.InvalidInput): message = _("Invalid service type %(service_type)s.") def _validate_flavor_service_type(validate_type, valid_values=None): """Ensure requested flavor service type plugin is loaded.""" plugins = manager.NeutronManager.get_service_plugins() if validate_type not in plugins: raise InvalidFlavorServiceType(service_type=validate_type) attr.validators['type:validate_flavor_service_type'] = ( _validate_flavor_service_type) FLAVORS = 'flavors' SERVICE_PROFILES = 'service_profiles' FLAVORS_PREFIX = "" RESOURCE_ATTRIBUTE_MAP = { FLAVORS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'service_type': {'allow_post': True, 'allow_put': False, 'validate': {'type:validate_flavor_service_type': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'service_profiles': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_list': None}, 'is_visible': True, 'default': []}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'default': True, 'is_visible': True}, }, SERVICE_PROFILES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'driver': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'metainfo': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'is_visible': True, 'default': True}, }, } SUB_RESOURCE_ATTRIBUTE_MAP = { 'next_providers': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'provider': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'driver': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'metainfo': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} }, 'service_profiles': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} } } class Flavors(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Service Flavors" @classmethod def get_alias(cls): return "flavors" @classmethod def get_description(cls): return "Flavor specification for Neutron advanced services" @classmethod def get_updated(cls): return "2015-09-17T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.FLAVORS) plugin = manager.NeutronManager.get_service_plugins()[ constants.FLAVORS] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=FLAVORS_PREFIX, attr_map=params) resources.append(resource) return resources def update_attributes_map(self, attributes): super(Flavors, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {}
{ "repo_name": "chitr/neutron", "path": "neutron/extensions/flavors.py", "copies": "1", "size": "9315", "license": "apache-2.0", "hash": 3693083552446385000, "line_mean": 38.6382978723, "line_max": 78, "alpha_frac": 0.5405260333, "autogenerated": false, "ratio": 4.423076923076923, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00007465472191116088, "num_lines": 235 }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc from neutron import manager # TODO(amotoki): Move security group RPC API and agent callback # from neutron/agent/securitygroups_rpc.py. class SecurityGroupServerRpcCallback(n_rpc.RpcCallback): """Callback for SecurityGroup agent RPC in plugin implementations.""" # API version history: # 1.1 - Initial version # 1.2 - security_group_info_for_devices introduced as an optimization # NOTE: RPC_API_VERSION must not be overridden in subclasses # to keep RPC API version consistent across plugins. RPC_API_VERSION = '1.2' @property def plugin(self): return manager.NeutronManager.get_plugin() def _get_devices_info(self, devices): return dict( (port['id'], port) for port in self.plugin.get_ports_from_devices(devices) if port and not port['device_owner'].startswith('network:') ) def security_group_rules_for_devices(self, context, **kwargs): """Callback method to return security group rules for each port. also convert remote_group_id rule to source_ip_prefix and dest_ip_prefix rule :params devices: list of devices :returns: port correspond to the devices with security group rules """ devices_info = kwargs.get('devices') ports = self._get_devices_info(devices_info) return self.plugin.security_group_rules_for_ports(context, ports) def security_group_info_for_devices(self, context, **kwargs): """Return security group information for requested devices. :params devices: list of devices :returns: sg_info{ 'security_groups': {sg_id: [rule1, rule2]} 'sg_member_ips': {sg_id: {'IPv4': [], 'IPv6': []}} 'devices': {device_id: {device_info}} } """ devices_info = kwargs.get('devices') ports = self._get_devices_info(devices_info) return self.plugin.security_group_info_for_ports(context, ports)
{ "repo_name": "nash-x/hws", "path": "neutron/api/rpc/handlers/securitygroups_rpc.py", "copies": "5", "size": "2631", "license": "apache-2.0", "hash": -7872055193864543000, "line_mean": 36.0563380282, "line_max": 78, "alpha_frac": 0.6647662486, "autogenerated": false, "ratio": 4.04147465437788, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7206240902977881, "avg_score": null, "num_lines": null }
# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron import manager from neutron.plugins.common import constants # Flavor Exceptions class FlavorNotFound(nexception.NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorInUse(nexception.InUse): message = _("Flavor %(flavor_id)s is used by some service instance.") class ServiceProfileNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s could not be found.") class ServiceProfileInUse(nexception.InUse): message = _("Service Profile %(sp_id)s is used by some service instance.") class FlavorServiceProfileBindingExists(nexception.Conflict): message = _("Service Profile %(sp_id)s is already associated " "with flavor %(fl_id)s.") class FlavorServiceProfileBindingNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s is not associated " "with flavor %(fl_id)s.") class ServiceProfileDriverNotFound(nexception.NotFound): message = _("Service Profile driver %(driver)s could not be found.") class ServiceProfileEmpty(nexception.InvalidInput): message = _("Service Profile needs either a driver or metainfo.") class FlavorDisabled(nexception.ServiceUnavailable): message = _("Flavor is not enabled.") class ServiceProfileDisabled(nexception.ServiceUnavailable): message = _("Service Profile is not enabled.") class InvalidFlavorServiceType(nexception.InvalidInput): message = _("Invalid service type %(service_type)s.") def _validate_flavor_service_type(validate_type, valid_values=None): """Ensure requested flavor service type plugin is loaded.""" plugins = manager.NeutronManager.get_service_plugins() if validate_type not in plugins: raise InvalidFlavorServiceType(service_type=validate_type) attr.validators['type:validate_flavor_service_type'] = ( _validate_flavor_service_type) FLAVORS = 'flavors' SERVICE_PROFILES = 'service_profiles' FLAVORS_PREFIX = "" RESOURCE_ATTRIBUTE_MAP = { FLAVORS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'service_type': {'allow_post': True, 'allow_put': False, 'validate': {'type:validate_flavor_service_type': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'service_profiles': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_list': None}, 'is_visible': True, 'default': []}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'default': True, 'is_visible': True}, }, SERVICE_PROFILES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'driver': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'metainfo': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'is_visible': True, 'default': True}, }, } SUB_RESOURCE_ATTRIBUTE_MAP = { 'next_providers': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'provider': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'driver': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'metainfo': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} }, 'service_profiles': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} } } class Flavors(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Service Flavors" @classmethod def get_alias(cls): return "flavors" @classmethod def get_description(cls): return "Flavor specification for Neutron advanced services" @classmethod def get_updated(cls): return "2015-09-17T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.FLAVORS) plugin = manager.NeutronManager.get_service_plugins()[ constants.FLAVORS] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=FLAVORS_PREFIX, attr_map=params) resources.append(resource) return resources def update_attributes_map(self, attributes): super(Flavors, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {}
{ "repo_name": "dims/neutron", "path": "neutron/extensions/flavors.py", "copies": "5", "size": "9343", "license": "apache-2.0", "hash": 7898715286617979000, "line_mean": 38.5889830508, "line_max": 78, "alpha_frac": 0.541153805, "autogenerated": false, "ratio": 4.417494089834515, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7458647894834515, "avg_score": null, "num_lines": null }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ class _FeatureFlag(object): def is_compatible(self, value): if value == self.requires: return True if value and self.supports: return True return False def __init__(self, supports, requires): self.supports = supports self.requires = requires if requires and not supports: raise RuntimeError(_("A driver can't require a feature and not " "support it.")) UNSUPPORTED = _FeatureFlag(supports=False, requires=False) OPTIONAL = _FeatureFlag(supports=True, requires=False) MANDATORY = _FeatureFlag(supports=True, requires=True) class L3ServiceProvider(object): """Base class for L3 service provider drivers. On __init__ this will be given a handle to the l3 plugin. It is then the responsibility of the driver to subscribe to the events it is interested in (e.g. router_create, router_update, router_delete, etc). The 'ha' and 'distributed' attributes below are used to determine if a router request with the 'ha' or 'distributed' attribute can be supported by this particular driver. These attributes must be present. The 'use_integrated_agent_scheduler' flag indicates whether or not routers which belong to the driver should be automatically scheduled using the L3 agent scheduler integrated into Neutron. """ ha_support = UNSUPPORTED distributed_support = UNSUPPORTED use_integrated_agent_scheduler = False def __init__(self, l3plugin): self.l3plugin = l3plugin
{ "repo_name": "sebrandon1/neutron", "path": "neutron/services/l3_router/service_providers/base.py", "copies": "4", "size": "2191", "license": "apache-2.0", "hash": -6780426277668985000, "line_mean": 35.5166666667, "line_max": 78, "alpha_frac": 0.6932907348, "autogenerated": false, "ratio": 4.279296875, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0002824858757062147, "num_lines": 60 }
# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns # The alias of the extension. ALIAS = 'dns-integration-domain-keywords' # Whether or not this extension is simply signaling behavior to the user # or it actively modifies the attribute map (mandatory). IS_SHIM_EXTENSION = True # Whether the extension is marking the adoption of standardattr model for # legacy resources, or introducing new standardattr attributes. False or # None if the standardattr model is adopted since the introduction of # resource extension (mandatory). # If this is True, the alias for the extension should be prefixed with # 'standard-attr-'. IS_STANDARD_ATTR_EXTENSION = False # The name of the extension (mandatory). NAME = 'DNS domain names with keywords allowed' # A prefix for API resources. An empty prefix means that the API is going # to be exposed at the v2/ level as any other core resource (mandatory). API_PREFIX = '' # The description of the extension (mandatory). DESCRIPTION = ("Allows to use keywords like <project_id>, <project_name>, " "<user_id> and <user_name> as DNS domain name") # A timestamp of when the extension was introduced (mandatory). UPDATED_TIMESTAMP = "2020-06-15T18:00:00-00:00" # The resource attribute map for the extension. It is effectively the # bulk of the API contract alongside ACTION_MAP (mandatory). RESOURCE_ATTRIBUTE_MAP = {} # The subresource attribute map for the extension. It adds child resources # to main extension's resource. The subresource map must have a parent and # a parameters entry. If an extension does not need such a map, None can # be specified (mandatory). For example: SUB_RESOURCE_ATTRIBUTE_MAP = {} # The action map: it associates verbs with methods to be performed on # the API resource (mandatory). ACTION_MAP = {} # The action status: it associates response statuses with methods to be # performed on the API resource (mandatory). ACTION_STATUS = {} # The list of required extensions (mandatory). REQUIRED_EXTENSIONS = [dns.ALIAS] # The list of optional extensions (mandatory). OPTIONAL_EXTENSIONS = []
{ "repo_name": "openstack/neutron-lib", "path": "neutron_lib/api/definitions/dns_domain_keywords.py", "copies": "1", "size": "2672", "license": "apache-2.0", "hash": 3802518491030245400, "line_mean": 38.2941176471, "line_max": 78, "alpha_frac": 0.7481287425, "autogenerated": false, "ratio": 4.012012012012012, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5260140754512013, "avg_score": null, "num_lines": null }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import fip64 from neutron_lib.api import extensions from neutron_lib import constants as lib_const from neutron_lib.db import utils as lib_db_utils from neutron_lib.plugins import directory from neutron.extensions import floatingip_pools as fip_pools_ext from neutron.objects import base as base_obj from neutron.objects import network as net_obj from neutron.objects import subnet as subnet_obj class FloatingIPPoolsDbMixin(object): """Class to support floating IP pool.""" _is_v6_supported = None @staticmethod def _make_floatingip_pool_dict(context, subnet, fields=None): res = {'subnet_id': subnet.id, 'subnet_name': subnet.name, 'tenant_id': context.tenant_id, 'network_id': subnet.network_id, 'cidr': str(subnet.cidr)} return lib_db_utils.resource_fields(res, fields) def get_floatingip_pools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return information for available floating IP pools""" pager = base_obj.Pager(sorts, limit, page_reverse, marker) net_ids = [n.network_id for n in net_obj.ExternalNetwork.get_objects(context)] # NOTE(hongbin): Use elevated context to make sure we have enough # permission to retrieve subnets that are not in current tenant # but belongs to external networks shared with current tenant. admin_context = context.elevated() subnet_objs = subnet_obj.Subnet.get_objects(admin_context, _pager=pager, network_id=net_ids) return [self._make_floatingip_pool_dict(context, obj, fields) for obj in subnet_objs if (obj.ip_version == lib_const.IP_VERSION_4 or self.is_v6_supported)] @property def is_v6_supported(self): supported = self._is_v6_supported if supported is None: supported = False for plugin in directory.get_plugins().values(): if extensions.is_extension_supported(plugin, fip64.ALIAS): supported = True break self._is_v6_supported = supported return supported class FloatingIPPoolsMixin(FloatingIPPoolsDbMixin, fip_pools_ext.FloatingIPPoolPluginBase): pass
{ "repo_name": "noironetworks/neutron", "path": "neutron/db/l3_fip_pools_db.py", "copies": "3", "size": "3154", "license": "apache-2.0", "hash": 3611935847788735000, "line_mean": 39.961038961, "line_max": 78, "alpha_frac": 0.634115409, "autogenerated": false, "ratio": 4.326474622770919, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.646059003177092, "avg_score": null, "num_lines": null }
# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import flavors as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper class Flavors(api_extensions.APIExtensionDescriptor): api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, apidef.RESOURCE_ATTRIBUTE_MAP) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, constants.FLAVORS) plugin = directory.get_plugin(constants.FLAVORS) for collection_name in apidef.SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parent') params = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=apidef.API_PREFIX, attr_map=params) resources.append(resource) return resources
{ "repo_name": "mahak/neutron", "path": "neutron/extensions/flavors.py", "copies": "5", "size": "2373", "license": "apache-2.0", "hash": -7204958039180643000, "line_mean": 38.55, "line_max": 78, "alpha_frac": 0.6342182891, "autogenerated": false, "ratio": 4.5899419729206965, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7724160262020697, "avg_score": null, "num_lines": null }
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as provider from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron._i18n import _ from neutron.common import utils as n_utils from neutron.objects import trunk as trunk_objects from neutron.services.trunk import constants from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import utils # This layer is introduced for keeping business logic and # data persistence decoupled. def trunk_can_be_managed(context, trunk): """Validate that the trunk can be managed.""" if not trunk.admin_state_up: raise trunk_exc.TrunkDisabled(trunk_id=trunk.id) def enforce_port_deletion_rules(resource, event, trigger, **kwargs): """Prohibit the deletion of a port that's used in a trunk.""" # NOTE: the ML2 plugin properly catches these exceptions when raised, but # non-ML2 plugins might not. To address this we should move the callback # registry notification emitted in the ML2 plugin's delete_port() higher # up in the plugin hierarchy. context = kwargs['context'] port_id = kwargs['port_id'] subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id) if subport_obj: raise trunk_exc.PortInUseAsSubPort(port_id=port_id, trunk_id=subport_obj.trunk_id) trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id) if trunk_obj: raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id, trunk_id=trunk_obj.id) class TrunkPortValidator(object): def __init__(self, port_id): self.port_id = port_id self._port = None def validate(self, context, parent_port=True): """Validate that the port can be used in a trunk. :param parent_port: True if the port is intended for use as parent in a trunk. """ # TODO(tidwellr): there is a chance of a race between the # time these checks are performed and the time the trunk # creation is executed. To be revisited, if it bites. # Validate that the given port_id is not used by a subport. subports = trunk_objects.SubPort.get_objects( context, port_id=self.port_id) if subports: raise trunk_exc.TrunkPortInUse(port_id=self.port_id) # Validate that the given port_id is not used by a trunk. trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id) if trunks: raise trunk_exc.ParentPortInUse(port_id=self.port_id) if parent_port: # if the port is being used as a parent in a trunk, check if # it can be trunked, i.e. if it is already associated to physical # resources (namely it is bound). Bound ports may be used as # trunk parents, but that depends on the underlying driver in # charge. if not self.can_be_trunked(context): raise trunk_exc.ParentPortInUse(port_id=self.port_id) else: # if the port is being used as subport in a trunk, check if it is a # port that is not actively used for other purposes, e.g. a router # port, compute port, DHCP port etc. We have no clue what the side # effects of connecting the port to a trunk would be, and it is # better to err on the side of caution and prevent the operation. self.check_not_in_use(context) return self.port_id def is_bound(self, context): """Return true if the port is bound, false otherwise.""" # Validate that the given port_id does not have a port binding. core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) return bool(self._port.get(portbindings.HOST_ID)) def can_be_trunked(self, context): """"Return true if a port can be trunked.""" if not self.is_bound(context): # An unbound port can be trunked, always. return True trunk_plugin = directory.get_plugin('trunk') vif_type = self._port.get(portbindings.VIF_TYPE) binding_host = self._port.get(portbindings.HOST_ID) # Determine the driver that will be in charge of the trunk: this # can be determined based on the vif type, whether or not the # driver is agent-based, and whether the host is running the agent # associated to the driver itself. host_agent_types = utils.get_agent_types_by_host(context, binding_host) drivers = [ driver for driver in trunk_plugin.registered_drivers if utils.is_driver_compatible( context, driver, vif_type, host_agent_types) ] if len(drivers) > 1: raise trunk_exc.TrunkPluginDriverConflict() elif len(drivers) == 1: return drivers[0].can_trunk_bound_port else: return False def check_not_in_use(self, context): """Raises PortInUse for ports assigned for device purposes.""" core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) # NOTE(armax): the trunk extension itself does not make use of the # device_id field, because it has no reason to. If need be, this # check can be altered to accommodate the change in logic. if self._port['device_id']: raise n_exc.PortInUse(net_id=self._port['network_id'], port_id=self._port['id'], device_id=self._port['device_id']) class SubPortsValidator(object): def __init__(self, segmentation_types, subports, trunk_port_id=None): self._segmentation_types = segmentation_types self.subports = subports self.trunk_port_id = trunk_port_id def validate(self, context, basic_validation=False, trunk_validation=True): """Validate that subports can be used in a trunk.""" # Perform basic validation on subports, in case subports # are not automatically screened by the API layer. if basic_validation: msg = validators.validate_subports(self.subports) if msg: raise n_exc.InvalidInput(error_message=msg) if trunk_validation: trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id) self._prepare_subports(context) return [self._validate(context, s, trunk_port_mtu) for s in self.subports] else: return self.subports def _prepare_subports(self, context): """Update subports segmentation details if INHERIT is requested.""" port_ids = { s['port_id']: i for i, s in enumerate(self.subports) if s.get('segmentation_type') == constants.INHERIT } core_plugin = directory.get_plugin() if not port_ids: return elif not n_utils.is_extension_supported(core_plugin, provider.ALIAS): msg = _("Cannot accept segmentation type %s") % constants.INHERIT raise n_exc.InvalidInput(error_message=msg) ports = core_plugin.get_ports(context, filters={'id': port_ids}) # this assumes a user does not try to trunk the same network # more than once. network_port_map = { x['network_id']: {'port_id': x['id']} for x in ports } networks = core_plugin.get_networks( context.elevated(), filters={'id': network_port_map}) for net in networks: port = network_port_map[net['id']] port.update({'segmentation_id': net[provider.SEGMENTATION_ID], 'segmentation_type': net[provider.NETWORK_TYPE]}) self.subports[port_ids[port['port_id']]] = port def _get_port_mtu(self, context, port_id): """ Return MTU for the network where the given port belongs to. If the network or port cannot be obtained, or if MTU is not defined, returns None. """ core_plugin = directory.get_plugin() if not n_utils.is_extension_supported(core_plugin, 'net-mtu'): return try: port = core_plugin.get_port(context, port_id) return core_plugin.get_network( context, port['network_id'])[api.MTU] except (n_exc.PortNotFound, n_exc.NetworkNotFound): # A concurrent request might have made the port or network # disappear; though during DB insertion, the subport request # will fail on integrity constraint, it is safer to return # a None MTU here. return def _raise_subport_is_parent_port(self, context, subport): if subport['port_id'] == self.trunk_port_id: raise trunk_exc.ParentPortInUse(port_id=subport['port_id']) def _raise_subport_invalid_mtu(self, context, subport, trunk_port_mtu): # Check MTU sanity - subport MTU must not exceed trunk MTU. # If for whatever reason trunk_port_mtu is not available, # the MTU sanity check cannot be enforced. if trunk_port_mtu: port_mtu = self._get_port_mtu(context, subport['port_id']) if port_mtu and port_mtu > trunk_port_mtu: raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu( port_id=subport['port_id'], port_mtu=port_mtu, trunk_id=self.trunk_port_id, trunk_mtu=trunk_port_mtu ) def _raise_if_segmentation_details_missing(self, subport): try: segmentation_type = subport["segmentation_type"] segmentation_id = ( converters.convert_to_int(subport["segmentation_id"])) return (segmentation_type, segmentation_id) except KeyError: msg = _("Invalid subport details '%s': missing segmentation " "information. Must specify both segmentation_id and " "segmentation_type") % subport raise n_exc.InvalidInput(error_message=msg) except n_exc.InvalidInput: msg = _("Invalid subport details: segmentation_id '%s' is " "not an integer") % subport["segmentation_id"] raise n_exc.InvalidInput(error_message=msg) def _raise_if_segmentation_details_invalid(self, segmentation_type, segmentation_id): if segmentation_type not in self._segmentation_types: msg = _("Unknown segmentation_type '%s'") % segmentation_type raise n_exc.InvalidInput(error_message=msg) if not self._segmentation_types[segmentation_type](segmentation_id): msg = _("Segmentation ID '%s' is not in range") % segmentation_id raise n_exc.InvalidInput(error_message=msg) def _raise_if_subport_is_used_in_other_trunk(self, context, subport): trunk_validator = TrunkPortValidator(subport['port_id']) trunk_validator.validate(context, parent_port=False) def _validate(self, context, subport, trunk_port_mtu): self._raise_subport_is_parent_port(context, subport) self._raise_subport_invalid_mtu(context, subport, trunk_port_mtu) segmentation_type, segmentation_id = ( self._raise_if_segmentation_details_missing(subport)) self._raise_if_segmentation_details_invalid( segmentation_type, segmentation_id) self._raise_if_subport_is_used_in_other_trunk(context, subport) return subport
{ "repo_name": "eayunstack/neutron", "path": "neutron/services/trunk/rules.py", "copies": "1", "size": "12625", "license": "apache-2.0", "hash": -4665204423603559000, "line_mean": 42.8368055556, "line_max": 79, "alpha_frac": 0.6228910891, "autogenerated": false, "ratio": 4.062097812097812, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5184988901197811, "avg_score": null, "num_lines": null }