docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Joins terms using OR operator. Args: terms (list): terms to join Examples: self._or_join(['term1', 'term2']) -> 'term1 | term2' Returns: str
def _or_join(self, terms): from six import text_type if isinstance(terms, (tuple, list)): if len(terms) > 1: return ' | '.join(text_type(t) for t in terms) else: return terms[0] else: return terms
988,068
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) assert isinstance(query, TextClause) datasets = {} def make_result(vid=None, b_score=0, p_score...
988,070
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is pair: (vid, score).
def _make_query_from_terms(self, terms, limit=None): expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['...
988,074
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): assert vid is not None query = text() self.execute(query, vid=vid)
988,075
Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is tuple of th...
def _make_query_from_terms(self, terms, limit=None): expanded_terms = self._expand_terms(terms) terms_used = 0 if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as ...
988,076
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) if query is not None: self.backend.library.database.set_connection_search_path() results...
988,077
Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): doc = super(self.__class__, self)._as_document(partition) # pass time_coverage to the _index_document. doc['time_coverage'] = partition.time_coverage return doc
988,078
Finds identifiers by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of IdentifierSearchResult instances.
def search(self, search_phrase, limit=None): query_parts = [ 'SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name'] query_params = { 'word': search_p...
988,082
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): query = text() self.execute(query, identifier=identifier)
988,084
Creates new partition and returns it. Args: table (orm.Table): Returns: orm.Partition
def new_partition(self, table, **kwargs): from . import Partition # Create the basic partition record, with a sequence ID. if isinstance(table, string_types): table = self.table(table) if 'sequence_id' in kwargs: sequence_id = kwargs['sequence_id'] ...
988,155
Finds appropriate term in the prop_tree and sets its value from config_instance. Args: configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs) prop_tree (PropertyDictTree): poperty tree to populate. config_instance (Config):
def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance): path = instance_to_path_map[config_instance] # find group group = prop_tree for elem in path[:-1]: group = getattr(group, elem) assert group._key == config_instance.parent.key setattr(group, co...
988,170
Get or create sqlalchemy instance. Args: session (Sqlalchemy session): model (sqlalchemy model): kwargs (dict): kwargs to lookup or create instance. Returns: Tuple: first element is found or created instance, second is boolean - True if instance created, False if in...
def get_or_create(session, model, **kwargs): instance = session.query(model).filter_by(**kwargs).first() if instance: return instance, False else: instance = model(**kwargs) if 'dataset' in kwargs: instance.update_sequence_id(session, kwargs['dataset']) sessi...
988,171
Finds appropriate config instance and returns it. Args: group_or_term (Group or Term): session (Sqlalchemy session): kwargs (dict): kwargs to pass to get_or_create. Returns: tuple of (Config, bool):
def _get_config_instance(group_or_term, session, **kwargs): path = group_or_term._get_path() cached = group_or_term._top._cached_configs.get(path) if cached: config = cached created = False else: # does not exist or not yet cached config, created = get_or_create(sess...
988,172
Creates materialized view for each indexed partition found in the query. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: converted asql if it contains index query. If not, returns asql_quer...
def _preprocess_sqlite_index(asql_query, library, backend, connection): new_query = None if asql_query.strip().lower().startswith('index'): logger.debug( '_preprocess_index: create index query found.\n asql query: {}' .format(asql_query)) index = parse_index(a...
988,415
Creates virtual table or read-only table for gion. Args: ref (str): id, vid, name or versioned name of the partition. materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table.
def install(self, connection, partition, table_name = None, index_columns=None, materialize=False, logger = None): virtual_table = partition.vid table = partition.vid if not table_name else table_name if self._relation_exists(connection, table): if logger: ...
988,416
Create an index on the columns. Args: connection (apsw.Connection): connection to sqlite database who stores mpr table or view. partition (orm.Partition): columns (list of str):
def index(self, connection, partition, columns): import hashlib query_tmpl = if not isinstance(columns,(list,tuple)): columns = [columns] col_list = ','.join('"{}"'.format(col) for col in columns) col_hash = hashlib.md5(col_list).hexdigest() tr...
988,417
Finds and returns view name in the sqlite db represented by given connection. Args: connection: connection to sqlite db where to look for partition table. table (orm.Table): Raises: MissingViewError: if database does not have partition table. Returns: ...
def _get_mpr_view(self, connection, table): logger.debug( 'Looking for view of the table.\n table: {}'.format(table.vid)) view = self.get_view_name(table) view_exists = self._relation_exists(connection, view) if view_exists: logger.debug( ...
988,419
Returns name of the sqlite table who stores mpr data. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the ...
def _get_mpr_table(self, connection, partition): # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optim...
988,420
Returns True if relation (table or view) exists in the sqlite db. Otherwise returns False. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: boolean: True if relation exists, False otherwise.
def _relation_exists(self, connection, relation): query = 'SELECT 1 FROM sqlite_master WHERE (type=\'table\' OR type=\'view\') AND name=?;' cursor = connection.cursor() cursor.execute(query, [relation]) result = cursor.fetchall() return result == [(1,)]
988,421
Creates and returns `CREATE TABLE ...` sql statement for given mprows. Args: partition (orm.Partition): tablename (str): name of the table in the return create query. include (list of str, optional): list of columns to include to query. Returns: str: cre...
def _get_create_query(partition, tablename, include=None): TYPE_MAP = { 'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE' ...
988,422
Creates sqlite virtual table for mpr file of the given partition. Args: connection: connection to the sqlite db who stores mpr data. partition (orm.Partition):
def _add_partition(self, connection, partition): logger.debug('Creating virtual table for partition.\n partition: {}'.format(partition.name)) sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt')
988,424
Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: ...
def _execute(self, connection, query, fetch=True): cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) ...
988,425
Authenticate user by any means and return either true or false. Args: Returns: tuple (is_valid, username): True is valid user, False if not
def authenticate(self): basic_auth = request.authorization is_valid = False user = None if basic_auth: is_valid, user = self.check_basic_auth( basic_auth.username, basic_auth.password ) else: # Try token auth token = r...
988,452
Decode a Base X encoded string into the number. Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479
def base62_decode(cls, string): alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) try: ...
988,485
Returns list of tuple containg columns of the table. Args: connection: sqlalchemy connection to sqlite database. table (str): name of the table Returns: list of (name, datatype, position): where name is column name, datatype is python type of the column, position is ordinal...
def _get_sqlite_columns(connection, table): # TODO: Move to the sqlite wrapper. # TODO: Consider sqlalchemy mapping. SQL_TO_PYTHON_TYPES = { 'INT': int, 'INTEGER': int, 'TINYINT': int, 'SMALLINT': int, 'MEDIUMINT': int, 'BIGINT': int, 'UNSIGNED BI...
988,703
Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create
def _convert_bundle(bundle): # shortcut for metadata meta = bundle.dataset.config.metadata notes = '' for f in bundle.dataset.files: if f.path.endswith('documentation.md'): contents = f.unpacked_contents if isinstance(contents, six.binary_type): con...
988,764
Finds table by ref and returns it. Args: ref (str): id, vid (versioned id) or name of the table Raises: NotFoundError: if table with given ref not found. Returns: orm.Table
def table(self, ref): try: obj_number = ObjectNumber.parse(ref) ds_obj_number = obj_number.as_dataset dataset = self._db.dataset(ds_obj_number) # Could do it in on SQL query, but this is easier. table = dataset.table(ref) except NotObjectNumbe...
988,885
Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state.
def index_library_datasets(self, tick_f=None): dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(da...
989,054
Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
def install(self, ref, table_name=None, index_columns=None,logger=None): try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() ...
989,187
Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database.
def materialize(self, ref, table_name=None, index_columns=None, logger=None): from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) ...
989,188
Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes.
def index(self, ref, columns): from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) ...
989,189
Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details.
def parse_sql(self, asql): import sqlparse statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True)) parsed_statements = [] for statement in statements: statement_str = statement.to_unicode().strip() for preprocessor in self._backend.sql_...
989,190
Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url.
def get_url_param(self, index, default=None): params = self.get_url_params() return params[index] if index < len(params) else default
989,195
Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded.
def _tzinfome(tzinfo): if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
989,286
Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object.
def localize(dt, force_to_local=True): if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
989,287
Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information.
def get_naive(dt): if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
989,288
Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch
def _wrap_method(name): method = getattr(datetime.datetime, name) # Have to give the second argument as method has no __module__ option. @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not is...
989,295
Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object.
def asdatetime(self, naive=True): args = list(self.timetuple()[0:6])+[self.microsecond] if not naive: args.append(self.tzinfo) return datetime.datetime(*args)
989,299
Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone.
def astimezone(self, tzinfo): # Assert we are not a naive datetime object assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
989,301
Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribu...
def replace(self, **kw): if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst...
989,302
Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart.
def days(start, end=None): return iterate.between(start, datetime.timedelta(days=1), end)
989,310
Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart.
def hours(start, end=None): return iterate.between(start, datetime.timedelta(hours=1), end)
989,311
Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart.
def minutes(start, end=None): return iterate.between(start, datetime.timedelta(minutes=1), end)
989,312
Parses asql query to view object. Args: query (str): asql query Returns: View instance: parsed view.
def parse_view(query): try: idx = query.lower().index('where') query = query[:idx] except ValueError: pass if not query.endswith(';'): query = query.strip() query += ';' result = _view_stmt.parseString(query) return View(result)
989,391
Updates current record. Args: rec (FIMRecord):
def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0): if not drop: drop = [] if not tables: tables = set() if not install: install = set() if not materialize: ...
989,404
Create a three letter acronym from the input string s. Args: past: A set object, for storing acronyms that have already been created prefix: A prefix added to the acronym before storing in the set s: The string to create the acronym from.
def make_acro(past, prefix, s): # pragma: no cover def _make_acro(s, t=0): # Really should cache these ... v = ['a', 'e', 'i', 'o', 'u', 'y'] c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v] s = re.sub(r'\W+', '', s.lower()) vx = ...
989,774
Initalizes partition search result fields. Args: dataset_vid (str): vid of the partition's dataset. vid (str): partition vid. score (int): score of the search result.
def __init__(self, dataset_vid=None, vid=None, score=None): assert vid is not None, 'vid can not be None.' assert dataset_vid is not None, 'dataset_vid can not be None.' assert score is not None, 'score can not be None.' self.dataset_vid = dataset_vid self.vid = vid ...
989,947
Joins terms using OR operator. Args: terms (list): terms to join Examples: self._or_join(['term1', 'term2']) -> 'term1 OR term2' Returns: str
def _or_join(self, terms): if isinstance(terms, (tuple, list)): if len(terms) > 1: return '(' + ' OR '.join(terms) + ')' else: return terms[0] else: return terms
989,950
Joins terms using AND operator. Args: terms (list): terms to join Examples: self._and_join(['term1']) -> 'term1' self._and_join(['term1', 'term2']) -> 'term1 AND term2' self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3' R...
def _and_join(self, terms): if len(terms) > 1: return ' AND '.join([self._or_join(t) for t in terms]) else: return self._or_join(terms[0])
989,952
Indexes exactly one object of the Ambry system. Args: instance (any): instance to index. force (boolean): if True replace document in the index. Returns: boolean: True if document added to index, False if document already exists in the index.
def index_one(self, instance, force=False): if not self.is_indexed(instance) and not force: doc = self._as_document(instance) self._index_document(doc, force=force) logger.debug('{} indexed as\n {}'.format(instance.__class__, pformat(doc))) return True ...
989,953
Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema.
def _as_document(self, dataset): # find tables. assert isinstance(dataset, Dataset) execute = object_session(dataset).connection().execute query = text() columns = u('\n').join( [u(' ').join(list(text_type(e) for e in t)) for t in execute(query, dataset_...
989,954
Expands terms of the dataset to the appropriate fields. It will parse the search phrase and return only the search term components that are applicable to a Dataset query. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings
def _expand_terms(self, terms): ret = { 'keywords': list(), 'doc': list()} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].a...
989,955
Converts given partition to the document indexed by FTS backend. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): schema = ' '.join( u'{} {} {} {} {}'.format( c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns) values = '' for stat in partiti...
989,956
Expands partition terms to the appropriate fields. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings
def _expand_terms(self, terms): ret = { 'keywords': list(), 'doc': list(), 'from': None, 'to': None} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) ...
989,957
Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers.
def _expand_place_ids(self, terms): place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if not first_type: first_type = result.type if result.type != first_type: # Ignore ones that aren'...
989,958
Converts given identifier to the document indexed by FTS backend. Args: identifier (dict): identifier to convert. Dict contains at least 'identifier', 'type' and 'name' keys. Returns: dict with structure matches to BaseIdentifierIndex._schema.
def _as_document(self, identifier): return { 'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name']) }
989,959
Parses search term to Args: s (str): string with search term. or_join (callable): function to join 'OR' terms. Returns: dict: all of the terms grouped by marker. Key is a marker, value is a term. Example: >>> SearchTermParser().parse('table2 fro...
def parse(self, s, term_join=None): if not term_join: term_join = lambda x: '(' + ' OR '.join(x) + ')' toks = self.scan(s) # Examples: starting with this query: # diabetes from 2014 to 2016 source healthindicators.gov # Assume the first term is ABOUT, if ...
989,963
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
989,981
Installs partition's mpr to the database to allow to execute sql queries over mpr. Args: connection: partition (orm.Partition): materialize (boolean): if True, create generic table. If False create MED over mpr. Returns: str: name of the created table.
def install(self, connection, partition, table_name=None, index_columns=None, materialize=False, logger=None): raise NotImplementedError
989,983
Installs all partitons of the table and create view with union of all partitons. Args: connection: connection to database who stores mpr data. table (orm.Table):
def install_table(self, connection, table, logger = None): # first install all partitions of the table queries = [] query_tmpl = 'SELECT * FROM {}' for partition in table.partitions: partition.localize() installed_name = self.install(connection, partitio...
989,984
Creates virtual tables for all partitions found in the query and executes query. Args: query (str): sql query fetch (bool): fetch result from database if True, do not fetch overwise.
def query(self, connection, query, fetch=True): self.install_module(connection) statements = sqlparse.parse(sqlparse.format(query, strip_comments=True)) # install all partitions and replace table names in the query. # logger.debug('Finding and installing all partition...
989,985
Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, ...
def _init_index(root_dir, schema, index_name): index_dir = os.path.join(root_dir, index_name) try: if not os.path.exists(index_dir): os.makedirs(index_dir) return create_in(index_dir, schema), index_dir else: return open_dir(index_dir), index_dir exc...
990,147
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): query_string = self._make_query_from_terms(search_phrase) self._parsed_query = query_string schema = self._get_generic_schema() parser = QueryParser('doc', schema=schema) query = parser.parse(query_string) datasets ...
990,151
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple: First element is str with FTS query, second is parameters of the query.
def _make_query_from_terms(self, terms): expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: if cterms: cterms = self.ba...
990,152
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): assert vid is not None, 'vid argument can not be None.' writer = self.index.writer() writer.delete_by_term('vid', vid) writer.commit()
990,154
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): assert identifier is not None, 'identifier argument can not be None.' writer = self.index.writer() writer.delete_by_term('identifier', identifier) writer.commit()
990,159
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Yields: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): query_string = self._make_query_from_terms(search_phrase) self._parsed_query = query_string schema = self._get_generic_schema() parser = QueryParser('doc', schema=schema) query = parser.parse(query_string) logger.debu...
990,163
returns a FTS query for partition created from decomposed search terms. args: terms (dict or str): returns: str containing fts query.
def _make_query_from_terms(self, terms): expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._or_join(expanded_terms['doc']) keywords = expanded_terms['keywords'] frm_to = self._from_to_as_term(expanded_t...
990,164
Turns from and to into the query format. Args: frm (str): from year to (str): to year Returns: FTS query str with years range.
def _from_to_as_term(self, frm, to): # The wackiness with the conversion to int and str, and adding ' ', is because there # can't be a space between the 'TO' and the brackets in the time range # when one end is open from_year = '' to_year = '' def year_or_empty...
990,165
Converts given string to ascii ignoring non ascii. Args: s (text or binary): Returns: str:
def _to_ascii(s): # TODO: Always use unicode within ambry. from six import text_type, binary_type if isinstance(s, text_type): ascii_ = s.encode('ascii', 'ignore') elif isinstance(s, binary_type): ascii_ = s.decode('utf-8').encode('ascii', 'ignore') else: raise Exception...
990,214
Creates FDW or materialize view for given partition. Args: connection: connection to postgresql partition (orm.Partition): materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table.
def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None): partition.localize() self._add_partition(connection, partition) fdw_table = partition.vid view_table = '{}_v'.format(fdw_table) if materialize: ...
990,327
Create an index on the columns. Args: connection: partition (orm.Partition): columns (list of str):
def index(self, connection, partition, columns): query_tmpl = 'CREATE INDEX ON {table_name} ({column});' table_name = '{}_v'.format(partition.vid) for column in columns: query = query_tmpl.format(table_name=table_name, column=column) logger.debug('Creating postgr...
990,328
Returns name of the postgres table who stores mpr data. Args: connection: connection to postgres db who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db.
def _get_mpr_table(self, connection, partition): # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optim...
990,330
Creates FDW for the partition. Args: connection: partition (orm.Partition):
def _add_partition(self, connection, partition): logger.debug('Creating foreign table for partition.\n partition: {}'.format(partition.name)) with connection.cursor() as cursor: postgres_med.add_partition(cursor, partition.datafile, partition.vid)
990,331
Executes given query and returns result. Args: connection: connection to postgres database who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with qu...
def _execute(self, connection, query, fetch=True): # execute query with connection.cursor() as cursor: cursor.execute(query) if fetch: return cursor.fetchall() else: cursor.execute('COMMIT;')
990,333
Returns True if relation exists in the postgres db. Otherwise returns False. Args: connection: connection to postgres database who stores mpr data. relation (str): name of the table, view or materialized view. Note: relation means table, view or materialized view he...
def _relation_exists(cls, connection, relation): schema_name, table_name = relation.split('.') exists_query = with connection.cursor() as cursor: cursor.execute(exists_query, [schema_name, table_name]) result = cursor.fetchall() return result == [(1...
990,334
Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment)
def create_win32tz_map(windows_zones_xml): coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = ...
990,553
Collects all migrations and applies missed. Args: connection (sqlalchemy connection):
def migrate(connection, dsn): all_migrations = _get_all_migrations() logger.debug('Collected migrations: {}'.format(all_migrations)) for version, modname in all_migrations: if _is_missed(connection, version) and version <= SCHEMA_VERSION: logger.info('Missed migration: {} migration...
990,690
Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file.
def create_migration_template(name): assert name, 'Name of the migration can not be empty.' from . import migrations # Find next number # package = migrations prefix = package.__name__ + '.' all_versions = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, pr...
990,691
Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database.
def get_stored_version(connection): if connection.engine.name == 'sqlite': version = connection.execute('PRAGMA user_version').fetchone()[0] if version == 0: raise VersionIsNotStored return version elif connection.engine.name == 'postgresql': try: r ...
990,692
Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration.
def _update_version(connection, version): if connection.engine.name == 'sqlite': connection.execute('PRAGMA user_version = {}'.format(version)) elif connection.engine.name == 'postgresql': connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME))) conn...
990,695
Initializes database. Args: dsn (str): database connect string, 'sqlite://' for example. echo (boolean): echo parameter of the create_engine. engine_kwargs (dict): parameters to pass to the create_engine method of the Sqlalchemy.
def __init__(self, dsn, echo=False, foreign_keys=True, engine_kwargs=None, application_prefix='ambry'): self.dsn = dsn d = parse_url_to_dict(self.dsn) self.path = d['path'].replace('//', '/') self.driver = d['scheme'] self.engine_kwargs = engine_kwargs or {} ...
990,697
Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bund...
def partition(self, id_): from ..orm import Partition as OrmPartition from sqlalchemy import or_ from ..identity import PartialPartitionName if isinstance(id_, PartitionIdentity): id_ = id_.id_ elif isinstance(id_, PartialPartitionName): id_ = id...
990,835
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): # SQLite FTS can't find terms with `-`, therefore all hyphens were replaced with underscore # before save. Now to get appropriate result we need to replace all hyphens in the search phrase. # See http://stackoverflow.com/questions/3865733/how...
991,006
Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema.
def _as_document(self, dataset): assert isinstance(dataset, Dataset) doc = super(self.__class__, self)._as_document(dataset) # SQLite FTS can't find terms with `-`, replace it with underscore here and while searching. # See http://stackoverflow.com/questions/3865733/how-do-i-e...
991,007
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): query = text() self.backend.library.database.connection.execute(query, vid=vid)
991,009
Generates vids of all indexed identifiers. Args: limit (int, optional): If not empty, the maximum number of results to return Generates: str: vid of the document.
def list_documents(self, limit=None): limit_str = '' if limit: try: limit_str = 'LIMIT {}'.format(int(limit)) except (TypeError, ValueError): pass query = ('SELECT identifier FROM identifier_index ' + limit_str) for row i...
991,013
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): query = text() self.backend.library.database.connection.execute(query, identifier=identifier)
991,015
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): # SQLite FTS can't find terms with `-`, therefore all hyphens replaced with underscore before save. # Now to make proper query we need to replace all hyphens in the search phrase. # See http://stackoverflow.com/questions/3865733/how-do-i-esc...
991,018
Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): doc = super(self.__class__, self)._as_document(partition) # SQLite FTS can't find terms with `-`, replace it with underscore here and while searching. # See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-querie...
991,019
Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
def _make_query_from_terms(self, terms): match_query = '' expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: match_query = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: if match_query: ma...
991,020
Sends email. Args: recipients (list of str): subject (str): message (str): attachments (list of str): list containing full paths (txt files only) to attach to email.
def send_email(recipients, subject, message, attachments=None): if not attachments: attachments = [] if os.path.exists(EMAIL_SETTINGS_FILE): email_settings = json.load(open(EMAIL_SETTINGS_FILE)) sender = email_settings.get('sender', 'ambry@localhost') use_tls = email_settin...
991,082
A set of rules that applies to one or more directories within a Layout. Args: name (str): The name of the Domain. config (dict): The configuration dictionary that defines the entities and paths for the current domain.
def __init__(self, config): self.name = config['name'] self.config = config self.entities = {} self.files = [] self.include = listify(self.config.get('include', [])) self.exclude = listify(self.config.get('exclude', [])) if self.include and self.exclud...
991,420
Determine whether the passed file matches the Entity. Args: f (File): The File instance to match against. Returns: the matched value if a match was found, otherwise None.
def match_file(self, f, update_file=False): if self.map_func is not None: val = self.map_func(f) else: m = self.regex.search(f.path) val = m.group(1) if m is not None else None return self._astype(val)
991,422
Return a domain if one already exists, or create a new one if not. Args: domain (str, dict): Can be one of: - The name of the Domain to return (fails if none exists) - A path to the Domain configuration file - A dictionary containing configuration inf...
def _get_or_load_domain(self, domain): if isinstance(domain, six.string_types): if domain in self.domains: return self.domains[domain] elif exists(domain): with open(domain, 'r') as fobj: domain = json.load(fobj) el...
991,424
Save the current Layout's index to a .json file. Args: filename (str): Filename to write to. Note: At the moment, this won't serialize directory-specific config files. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs...
def save_index(self, filename): data = {} for f in self.files.values(): entities = {v.entity.id: v.value for k, v in f.tags.items()} data[f.path] = {'domains': f.domains, 'entities': entities} with open(filename, 'w') as outfile: json.dump(data, outfi...
991,431
Return the count of unique values or files for the named entity. Args: entity (str): The name of the entity. files (bool): If True, counts the number of filenames that contain at least one value of the entity, rather than the number of unique values of th...
def count(self, entity, files=False): return self._find_entity(entity).count(files)
991,435
Return information for all Files tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a...
def as_data_frame(self, **kwargs): try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a Layout " "as a pandas DataFrame when you don't have " "pandas installed? E...
991,436
Build FM-index Params: <iterator> | <generator> docs <str> filename
def build(self, docs=None, filename=None): if docs: if hasattr(docs, 'items'): for (idx, doc) in sorted(getattr(docs, 'items')(), key=lambda x: x[0]): self.fm.push_back(doc) else: for do...
991,748
Merge of filter search results Params: <str> | <Sequential> query <bool> _or Return: <list> computed_dids
def _merge_search_result(self, search_results, _or=False): all_docids = reduce(add, [list(x.keys()) for x in search_results]) if _or: return sorted(set(all_docids), key=all_docids.index) return [docid for docid in set(all_docids) if all_docids.count(docid) > 1]
991,749