repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso3_country_code | python | def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None | Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L449-L483 | [
"def countriesdata(cls, use_live=True):\n # type: (bool) -> List[Dict[Dict]]\n \"\"\"\n Read countries data from OCHA countries feed (falling back to file)\n\n Args:\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\n Returns:\n List[Dict[Dict]]: Countries dictionaries\n \"\"\"\n if cls._countriesdata is None:\n countries = None\n if use_live:\n try:\n countries = hxl.data(cls._ochaurl)\n except IOError:\n logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n if countries is None:\n countries = hxl.data(\n script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n Country), allow_local=True)\n cls.set_countriesdata(countries)\n return cls._countriesdata\n",
"def expand_countryname_abbrevs(cls, country):\n # type: (str) -> List[str]\n \"\"\"Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)\n\n Args:\n country (str): Country with abbreviation(s)to expand\n\n Returns:\n List[str]: Uppercase country name with abbreviation(s) expanded in various ways\n \"\"\"\n def replace_ensure_space(word, replace, replacement):\n return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()\n countryupper = country.upper()\n for abbreviation in cls.abbreviations:\n countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])\n candidates = [countryupper]\n for abbreviation in cls.multiple_abbreviations:\n if abbreviation in countryupper:\n for expanded in cls.multiple_abbreviations[abbreviation]:\n candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))\n return candidates\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso3_country_code_fuzzy | python | def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False | Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False). | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L486-L556 | null | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_countries_in_region | python | def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list() | Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L559-L583 | [
"def countriesdata(cls, use_live=True):\n # type: (bool) -> List[Dict[Dict]]\n \"\"\"\n Read countries data from OCHA countries feed (falling back to file)\n\n Args:\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\n Returns:\n List[Dict[Dict]]: Countries dictionaries\n \"\"\"\n if cls._countriesdata is None:\n countries = None\n if use_live:\n try:\n countries = hxl.data(cls._ochaurl)\n except IOError:\n logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n if countries is None:\n countries = hxl.data(\n script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n Country), allow_local=True)\n cls.set_countriesdata(countries)\n return cls._countriesdata\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
|
paylogic/halogen | halogen/exceptions.py | ValidationError.to_dict | python | def to_dict(self):
def exception_to_dict(e):
try:
return e.to_dict()
except AttributeError:
return {
"type": e.__class__.__name__,
"error": str(e),
}
result = {
"errors": [exception_to_dict(e) for e in self.errors]
}
if self.index is not None:
result["index"] = self.index
else:
result["attr"] = self.attr if self.attr is not None else "<root>"
return result | Return a dictionary representation of the error.
:return: A dict with the keys:
- attr: Attribute which contains the error, or "<root>" if it refers to the schema root.
- errors: A list of dictionary representations of the errors. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/exceptions.py#L18-L41 | null | class ValidationError(Exception):
"""Validation failed."""
def __init__(self, errors, attr=None, index=None):
self.attr = attr
self.index = index
if isinstance(errors, list):
self.errors = errors
else:
self.errors = [errors]
def __str__(self):
return json.dumps(self.to_dict())
|
paylogic/halogen | halogen/schema.py | _get_context | python | def _get_context(argspec, kwargs):
if argspec.keywords is not None:
return kwargs
return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs) | Prepare a context for the serialization.
:param argspec: The argspec of the serialization function.
:param kwargs: Dict with context
:return: Keywords arguments that function can accept. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L29-L38 | null | """Halogen schema primitives."""
import sys
import inspect
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict # noqa
from cached_property import cached_property
from halogen import types
from halogen import exceptions
PY2 = sys.version_info[0] == 2
if not PY2: # pragma: no cover
string_types = (str,)
else: # pragma: no cover
string_types = (str, unicode)
def BYPASS(value):
"""Bypass getter."""
return value
class Accessor(object):
"""Object that encapsulates the getter and the setter of the attribute."""
def __init__(self, getter=None, setter=None):
"""Initialize an Accessor object."""
self.getter = getter
self.setter = setter
@cached_property
def _getter_argspec(self):
return inspect.getargspec(self.getter)
def get(self, obj, **kwargs):
"""Get an attribute from a value.
:param obj: Object to get the attribute value from.
:return: Value of object's attribute.
"""
assert self.getter is not None, "Getter accessor is not specified."
if callable(self.getter):
return self.getter(obj, **_get_context(self._getter_argspec, kwargs))
assert isinstance(self.getter, string_types), "Accessor must be a function or a dot-separated string."
for attr in self.getter.split("."):
if isinstance(obj, dict):
obj = obj[attr]
else:
obj = getattr(obj, attr)
if callable(obj):
return obj()
return obj
def set(self, obj, value):
"""Set value for obj's attribute.
:param obj: Result object or dict to assign the attribute to.
:param value: Value to be assigned.
"""
assert self.setter is not None, "Setter accessor is not specified."
if callable(self.setter):
return self.setter(obj, value)
assert isinstance(self.setter, string_types), "Accessor must be a function or a dot-separated string."
def _set(obj, attr, value):
if isinstance(obj, dict):
obj[attr] = value
else:
setattr(obj, attr, value)
return value
path = self.setter.split(".")
for attr in path[:-1]:
obj = _set(obj, attr, {})
_set(obj, path[-1], value)
def __repr__(self):
"""Accessor representation."""
return "<{0} getter='{1}', setter='{2}'>".format(
self.__class__.__name__,
self.getter,
self.setter,
)
class Attr(object):
"""Schema attribute."""
creation_counter = 0
def __init__(self, attr_type=None, attr=None, required=True, **kwargs):
"""Attribute constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is attribute required to be present.
"""
self.attr_type = attr_type or types.Type()
self.attr = attr
self.required = required
if "default" in kwargs:
self.default = kwargs["default"]
self.creation_counter = Attr.creation_counter
Attr.creation_counter += 1
@property
def compartment(self):
"""The key of the compartment this attribute will be placed into (for example: _links or _embedded)."""
return None
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment)."""
return self.name
@cached_property
def accessor(self):
"""Get an attribute's accessor with the getter and the setter.
:return: `Accessor` instance.
"""
if isinstance(self.attr, Accessor):
return self.attr
if callable(self.attr):
return Accessor(getter=self.attr)
attr = self.attr or self.name
return Accessor(getter=attr, setter=attr)
@cached_property
def _attr_type_serialize_argspec(self):
return inspect.getargspec(self.attr_type.serialize)
def serialize(self, value, **kwargs):
"""Serialize the attribute of the input data.
Gets the attribute value with accessor and converts it using the
type serialization. Schema will place this serialized value into
corresponding compartment of the HAL structure with the name of the
attribute as a key.
:param value: Value to get the attribute value from.
:return: Serialized attribute value.
"""
if types.Type.is_type(self.attr_type):
try:
value = self.accessor.get(value, **kwargs)
except (AttributeError, KeyError):
if not hasattr(self, "default") and self.required:
raise
value = self.default() if callable(self.default) else self.default
return self.attr_type.serialize(value, **_get_context(self._attr_type_serialize_argspec, kwargs))
return self.attr_type
def deserialize(self, value, **kwargs):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs)
def __repr__(self):
"""Attribute representation."""
return "<{0} '{1}'>".format(
self.__class__.__name__,
self.name,
)
def setter(self, setter):
"""Set an attribute setter accessor function.
Can be used as a decorator:
@total.setter
def set_total(obj, value):
obj.total = value
"""
self.accessor.setter = setter
def __call__(self, getter):
"""Decorate a getter accessor function."""
self.name = getter.__name__
self.accessor.getter = getter
return self
def attr(*args, **kwargs):
"""Attribute as a decorator alias.
Decorates the getter function like:
@halogen.attr(AmountType(), default=None)
def total(obj):
return sum((item.amount for item in obj.items), 0)
This is identical to using attr with a lambda, but more practical in case of larger functions:
total = halogen.Attr(AmountType(), default=None, attr=lambda obj: sum((item.amount for item in obj.items), 0))
"""
return Attr(*args, **kwargs)
class Link(Attr):
"""Link attribute of a schema."""
def __init__(self, attr_type=None, attr=None, key=None, required=True,
curie=None, templated=None, type=None, deprecation=None):
"""Link constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param key: Key of the link in the _links compartment, defaults to name.
:param required: Is this link required to be present.
:param curie: Link namespace prefix (e.g. "<prefix>:<name>") or Curie object.
:param templated: Is this link templated.
:param deprecation: Link deprecation URL.
:param type: Its value is a string used as a hint to indicate the media type expected when dereferencing
the target resource.
"""
if not types.Type.is_type(attr_type):
if attr_type is not None:
attr = BYPASS
attrs = {
'templated': templated,
'type': type,
'deprecation': deprecation,
}
class LinkSchema(Schema):
href = Attr(attr_type=attr_type, attr=BYPASS)
if attrs['templated'] is not None:
templated = Attr(attr=lambda value: templated)
if attrs['type'] is not None:
type = Attr(attr=lambda value: type)
if attrs['deprecation'] is not None:
deprecation = Attr(attr=lambda value: deprecation)
attr_type = LinkSchema
super(Link, self).__init__(attr_type=attr_type, attr=attr, required=required)
self.curie = curie
self._key = key
@property
def compartment(self):
"""Return the compartment in which Links are placed (_links)."""
return "_links"
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment).
:note: Links support curies.
"""
name = self._key or self.name
if self.curie is None:
return name
return ":".join((self.curie.name, name))
def deserialize(self, value):
"""Link doesn't support deserialization."""
raise NotImplementedError
class LinkList(Link):
"""List of links attribute of a schema."""
def __init__(self, attr_type=None, attr=None, required=True, curie=None):
"""LinkList constructor.
:param attr_type: Type, Schema or constant that does item type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is this list of links required to be present.
:param curie: Link namespace prefix (e.g. "<prefix>:<name>") or Curie object.
"""
super(LinkList, self).__init__(attr_type=attr_type, attr=attr, required=required, curie=curie)
self.attr_type = types.List(self.attr_type)
class Curie(object):
"""Curie object."""
def __init__(self, name, href, templated=None, type=None):
"""Curie constructor.
:param href: Curie link href value.
:param templated: Is this curie link templated.
:param type: Its value is a string used as a hint to indicate the media type expected when dereferencing
the target resource.
"""
self.name = name
self.href = href
if templated is not None:
self.templated = templated
if type is not None:
self.type = type
class Embedded(Attr):
"""Embedded attribute of schema."""
def __init__(self, attr_type=None, attr=None, curie=None, required=True):
"""Embedded constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param curie: The curie used for this embedded attribute.
"""
super(Embedded, self).__init__(attr_type=attr_type, attr=attr, required=required)
self.curie = curie
@property
def compartment(self):
"""Embedded objects are placed in the _objects."""
return "_embedded"
@property
def key(self):
"""Embedded supports curies."""
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name))
class _Schema(types.Type):
"""Type for creating schema."""
def __new__(cls, **kwargs):
"""Create schema from keyword arguments."""
schema = type("Schema", (cls, ), {"__doc__": cls.__doc__})
schema.__class_attrs__ = OrderedDict()
schema.__attrs__ = OrderedDict()
for name, attr in kwargs.items():
if not hasattr(attr, "name"):
attr.name = name
schema.__class_attrs__[attr.name] = attr
schema.__attrs__[attr.name] = attr
return schema
@classmethod
def serialize(cls, value, **kwargs):
result = OrderedDict()
for attr in cls.__attrs__.values():
compartment = result
if attr.compartment is not None:
compartment = result.setdefault(attr.compartment, OrderedDict())
try:
compartment[attr.key] = attr.serialize(value, **kwargs)
except (AttributeError, KeyError):
if attr.required:
raise
if attr.compartment is not None and len(compartment) == 0:
del result[attr.compartment]
return result
@classmethod
def deserialize(cls, value, output=None, **kwargs):
"""Deserialize the HAL structure into the output value.
:param value: Dict of already loaded json which will be deserialized by schema attributes.
:param output: If present, the output object will be updated instead of returning the deserialized data.
:returns: Dict of deserialized value for attributes. Where key is name of schema's attribute and value is
deserialized value from value dict.
:raises: ValidationError.
"""
errors = []
result = {}
for attr in cls.__attrs__.values():
try:
result[attr.name] = attr.deserialize(value, **kwargs)
except NotImplementedError:
# Links don't support deserialization
continue
except ValueError as e:
errors.append(exceptions.ValidationError(e, attr.name))
except exceptions.ValidationError as e:
e.attr = attr.name
errors.append(e)
except (KeyError, AttributeError):
if attr.required:
errors.append(exceptions.ValidationError("Missing attribute.", attr.name))
if errors:
raise exceptions.ValidationError(errors)
if output is None:
return result
for attr in cls.__attrs__.values():
if attr.name in result:
attr.accessor.set(output, result[attr.name])
class _SchemaType(type):
"""A type used to create Schemas."""
def __init__(cls, name, bases, clsattrs):
"""Create a new _SchemaType."""
cls.__class_attrs__ = OrderedDict()
curies = set([])
attrs = [(key, value) for key, value in clsattrs.items() if isinstance(value, Attr)]
attrs.sort(key=lambda attr: attr[1].creation_counter)
# Collect the attributes and set their names.
for name, attr in attrs:
delattr(cls, name)
cls.__class_attrs__[name] = attr
if not hasattr(attr, "name"):
attr.name = name
if isinstance(attr, (Link, Embedded)):
curie = getattr(attr, "curie", None)
if curie is not None:
curies.add(curie)
# Collect CURIEs and create the link attribute
if curies:
link = LinkList(
Schema(
href=Attr(),
name=Attr(),
templated=Attr(required=False),
type=Attr(required=False),
),
attr=lambda value: list(curies),
required=False,
)
link.name = "curies"
cls.__class_attrs__[link.name] = link
cls.__attrs__ = OrderedDict()
for base in reversed(cls.__mro__):
cls.__attrs__.update(getattr(base, "__class_attrs__", OrderedDict()))
Schema = _SchemaType("Schema", (_Schema, ), {"__doc__": _Schema.__doc__})
"""Schema is the basic class used for setting up schemas."""
|
paylogic/halogen | halogen/schema.py | Accessor.get | python | def get(self, obj, **kwargs):
assert self.getter is not None, "Getter accessor is not specified."
if callable(self.getter):
return self.getter(obj, **_get_context(self._getter_argspec, kwargs))
assert isinstance(self.getter, string_types), "Accessor must be a function or a dot-separated string."
for attr in self.getter.split("."):
if isinstance(obj, dict):
obj = obj[attr]
else:
obj = getattr(obj, attr)
if callable(obj):
return obj()
return obj | Get an attribute from a value.
:param obj: Object to get the attribute value from.
:return: Value of object's attribute. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L54-L75 | [
"def _get_context(argspec, kwargs):\n \"\"\"Prepare a context for the serialization.\n\n :param argspec: The argspec of the serialization function.\n :param kwargs: Dict with context\n :return: Keywords arguments that function can accept.\n \"\"\"\n if argspec.keywords is not None:\n return kwargs\n return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)\n",
"def basic_obj_get(obj):\n \"\"\"Return the value of attribute \"value\" of \"obj\".\"\"\"\n return obj.value\n",
"def basic_dict_get(dic):\n \"\"\"Return the value of the key \"value\" from the dict.\"\"\"\n return dic[\"value\"]\n"
] | class Accessor(object):
"""Object that encapsulates the getter and the setter of the attribute."""
def __init__(self, getter=None, setter=None):
"""Initialize an Accessor object."""
self.getter = getter
self.setter = setter
@cached_property
def _getter_argspec(self):
return inspect.getargspec(self.getter)
def set(self, obj, value):
"""Set value for obj's attribute.
:param obj: Result object or dict to assign the attribute to.
:param value: Value to be assigned.
"""
assert self.setter is not None, "Setter accessor is not specified."
if callable(self.setter):
return self.setter(obj, value)
assert isinstance(self.setter, string_types), "Accessor must be a function or a dot-separated string."
def _set(obj, attr, value):
if isinstance(obj, dict):
obj[attr] = value
else:
setattr(obj, attr, value)
return value
path = self.setter.split(".")
for attr in path[:-1]:
obj = _set(obj, attr, {})
_set(obj, path[-1], value)
def __repr__(self):
"""Accessor representation."""
return "<{0} getter='{1}', setter='{2}'>".format(
self.__class__.__name__,
self.getter,
self.setter,
)
|
paylogic/halogen | halogen/schema.py | Accessor.set | python | def set(self, obj, value):
assert self.setter is not None, "Setter accessor is not specified."
if callable(self.setter):
return self.setter(obj, value)
assert isinstance(self.setter, string_types), "Accessor must be a function or a dot-separated string."
def _set(obj, attr, value):
if isinstance(obj, dict):
obj[attr] = value
else:
setattr(obj, attr, value)
return value
path = self.setter.split(".")
for attr in path[:-1]:
obj = _set(obj, attr, {})
_set(obj, path[-1], value) | Set value for obj's attribute.
:param obj: Result object or dict to assign the attribute to.
:param value: Value to be assigned. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L77-L100 | [
"def basic_set_object(obj, value):\n \"\"\"Set the value of attribute \"value\" of \"obj\".\"\"\"\n obj.value = value\n",
"def basic_set_dict(dic, value):\n \"\"\"Set the value of attribute \"value\" of \"obj\".\"\"\"\n dic[\"value\"] = value\n",
"def _set(obj, attr, value):\n if isinstance(obj, dict):\n obj[attr] = value\n else:\n setattr(obj, attr, value)\n return value\n"
] | class Accessor(object):
"""Object that encapsulates the getter and the setter of the attribute."""
def __init__(self, getter=None, setter=None):
"""Initialize an Accessor object."""
self.getter = getter
self.setter = setter
@cached_property
def _getter_argspec(self):
return inspect.getargspec(self.getter)
def get(self, obj, **kwargs):
"""Get an attribute from a value.
:param obj: Object to get the attribute value from.
:return: Value of object's attribute.
"""
assert self.getter is not None, "Getter accessor is not specified."
if callable(self.getter):
return self.getter(obj, **_get_context(self._getter_argspec, kwargs))
assert isinstance(self.getter, string_types), "Accessor must be a function or a dot-separated string."
for attr in self.getter.split("."):
if isinstance(obj, dict):
obj = obj[attr]
else:
obj = getattr(obj, attr)
if callable(obj):
return obj()
return obj
def __repr__(self):
"""Accessor representation."""
return "<{0} getter='{1}', setter='{2}'>".format(
self.__class__.__name__,
self.getter,
self.setter,
)
|
paylogic/halogen | halogen/schema.py | Attr.accessor | python | def accessor(self):
if isinstance(self.attr, Accessor):
return self.attr
if callable(self.attr):
return Accessor(getter=self.attr)
attr = self.attr or self.name
return Accessor(getter=attr, setter=attr) | Get an attribute's accessor with the getter and the setter.
:return: `Accessor` instance. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L144-L156 | null | class Attr(object):
"""Schema attribute."""
creation_counter = 0
def __init__(self, attr_type=None, attr=None, required=True, **kwargs):
"""Attribute constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is attribute required to be present.
"""
self.attr_type = attr_type or types.Type()
self.attr = attr
self.required = required
if "default" in kwargs:
self.default = kwargs["default"]
self.creation_counter = Attr.creation_counter
Attr.creation_counter += 1
@property
def compartment(self):
"""The key of the compartment this attribute will be placed into (for example: _links or _embedded)."""
return None
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment)."""
return self.name
@cached_property
@cached_property
def _attr_type_serialize_argspec(self):
return inspect.getargspec(self.attr_type.serialize)
def serialize(self, value, **kwargs):
"""Serialize the attribute of the input data.
Gets the attribute value with accessor and converts it using the
type serialization. Schema will place this serialized value into
corresponding compartment of the HAL structure with the name of the
attribute as a key.
:param value: Value to get the attribute value from.
:return: Serialized attribute value.
"""
if types.Type.is_type(self.attr_type):
try:
value = self.accessor.get(value, **kwargs)
except (AttributeError, KeyError):
if not hasattr(self, "default") and self.required:
raise
value = self.default() if callable(self.default) else self.default
return self.attr_type.serialize(value, **_get_context(self._attr_type_serialize_argspec, kwargs))
return self.attr_type
def deserialize(self, value, **kwargs):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs)
def __repr__(self):
"""Attribute representation."""
return "<{0} '{1}'>".format(
self.__class__.__name__,
self.name,
)
def setter(self, setter):
"""Set an attribute setter accessor function.
Can be used as a decorator:
@total.setter
def set_total(obj, value):
obj.total = value
"""
self.accessor.setter = setter
def __call__(self, getter):
"""Decorate a getter accessor function."""
self.name = getter.__name__
self.accessor.getter = getter
return self
|
paylogic/halogen | halogen/schema.py | Attr.serialize | python | def serialize(self, value, **kwargs):
if types.Type.is_type(self.attr_type):
try:
value = self.accessor.get(value, **kwargs)
except (AttributeError, KeyError):
if not hasattr(self, "default") and self.required:
raise
value = self.default() if callable(self.default) else self.default
return self.attr_type.serialize(value, **_get_context(self._attr_type_serialize_argspec, kwargs))
return self.attr_type | Serialize the attribute of the input data.
Gets the attribute value with accessor and converts it using the
type serialization. Schema will place this serialized value into
corresponding compartment of the HAL structure with the name of the
attribute as a key.
:param value: Value to get the attribute value from.
:return: Serialized attribute value. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L162-L183 | [
"def _get_context(argspec, kwargs):\n \"\"\"Prepare a context for the serialization.\n\n :param argspec: The argspec of the serialization function.\n :param kwargs: Dict with context\n :return: Keywords arguments that function can accept.\n \"\"\"\n if argspec.keywords is not None:\n return kwargs\n return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)\n",
"def is_type(value):\n \"\"\"Determine if value is an instance or subclass of the class Type.\"\"\"\n if isinstance(value, type):\n return issubclass(value, Type)\n return isinstance(value, Type)\n"
] | class Attr(object):
"""Schema attribute."""
creation_counter = 0
def __init__(self, attr_type=None, attr=None, required=True, **kwargs):
"""Attribute constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is attribute required to be present.
"""
self.attr_type = attr_type or types.Type()
self.attr = attr
self.required = required
if "default" in kwargs:
self.default = kwargs["default"]
self.creation_counter = Attr.creation_counter
Attr.creation_counter += 1
@property
def compartment(self):
"""The key of the compartment this attribute will be placed into (for example: _links or _embedded)."""
return None
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment)."""
return self.name
@cached_property
def accessor(self):
"""Get an attribute's accessor with the getter and the setter.
:return: `Accessor` instance.
"""
if isinstance(self.attr, Accessor):
return self.attr
if callable(self.attr):
return Accessor(getter=self.attr)
attr = self.attr or self.name
return Accessor(getter=attr, setter=attr)
@cached_property
def _attr_type_serialize_argspec(self):
return inspect.getargspec(self.attr_type.serialize)
def deserialize(self, value, **kwargs):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs)
def __repr__(self):
"""Attribute representation."""
return "<{0} '{1}'>".format(
self.__class__.__name__,
self.name,
)
def setter(self, setter):
"""Set an attribute setter accessor function.
Can be used as a decorator:
@total.setter
def set_total(obj, value):
obj.total = value
"""
self.accessor.setter = setter
def __call__(self, getter):
"""Decorate a getter accessor function."""
self.name = getter.__name__
self.accessor.getter = getter
return self
|
paylogic/halogen | halogen/schema.py | Attr.deserialize | python | def deserialize(self, value, **kwargs):
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs) | Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L185-L209 | null | class Attr(object):
"""Schema attribute."""
creation_counter = 0
def __init__(self, attr_type=None, attr=None, required=True, **kwargs):
"""Attribute constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is attribute required to be present.
"""
self.attr_type = attr_type or types.Type()
self.attr = attr
self.required = required
if "default" in kwargs:
self.default = kwargs["default"]
self.creation_counter = Attr.creation_counter
Attr.creation_counter += 1
@property
def compartment(self):
"""The key of the compartment this attribute will be placed into (for example: _links or _embedded)."""
return None
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment)."""
return self.name
@cached_property
def accessor(self):
"""Get an attribute's accessor with the getter and the setter.
:return: `Accessor` instance.
"""
if isinstance(self.attr, Accessor):
return self.attr
if callable(self.attr):
return Accessor(getter=self.attr)
attr = self.attr or self.name
return Accessor(getter=attr, setter=attr)
@cached_property
def _attr_type_serialize_argspec(self):
return inspect.getargspec(self.attr_type.serialize)
def serialize(self, value, **kwargs):
"""Serialize the attribute of the input data.
Gets the attribute value with accessor and converts it using the
type serialization. Schema will place this serialized value into
corresponding compartment of the HAL structure with the name of the
attribute as a key.
:param value: Value to get the attribute value from.
:return: Serialized attribute value.
"""
if types.Type.is_type(self.attr_type):
try:
value = self.accessor.get(value, **kwargs)
except (AttributeError, KeyError):
if not hasattr(self, "default") and self.required:
raise
value = self.default() if callable(self.default) else self.default
return self.attr_type.serialize(value, **_get_context(self._attr_type_serialize_argspec, kwargs))
return self.attr_type
def __repr__(self):
"""Attribute representation."""
return "<{0} '{1}'>".format(
self.__class__.__name__,
self.name,
)
def setter(self, setter):
"""Set an attribute setter accessor function.
Can be used as a decorator:
@total.setter
def set_total(obj, value):
obj.total = value
"""
self.accessor.setter = setter
def __call__(self, getter):
"""Decorate a getter accessor function."""
self.name = getter.__name__
self.accessor.getter = getter
return self
|
paylogic/halogen | halogen/schema.py | Embedded.key | python | def key(self):
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name)) | Embedded supports curies. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L376-L380 | null | class Embedded(Attr):
"""Embedded attribute of schema."""
def __init__(self, attr_type=None, attr=None, curie=None, required=True):
"""Embedded constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param curie: The curie used for this embedded attribute.
"""
super(Embedded, self).__init__(attr_type=attr_type, attr=attr, required=required)
self.curie = curie
@property
def compartment(self):
"""Embedded objects are placed in the _objects."""
return "_embedded"
@property
|
paylogic/halogen | halogen/schema.py | _Schema.deserialize | python | def deserialize(cls, value, output=None, **kwargs):
errors = []
result = {}
for attr in cls.__attrs__.values():
try:
result[attr.name] = attr.deserialize(value, **kwargs)
except NotImplementedError:
# Links don't support deserialization
continue
except ValueError as e:
errors.append(exceptions.ValidationError(e, attr.name))
except exceptions.ValidationError as e:
e.attr = attr.name
errors.append(e)
except (KeyError, AttributeError):
if attr.required:
errors.append(exceptions.ValidationError("Missing attribute.", attr.name))
if errors:
raise exceptions.ValidationError(errors)
if output is None:
return result
for attr in cls.__attrs__.values():
if attr.name in result:
attr.accessor.set(output, result[attr.name]) | Deserialize the HAL structure into the output value.
:param value: Dict of already loaded json which will be deserialized by schema attributes.
:param output: If present, the output object will be updated instead of returning the deserialized data.
:returns: Dict of deserialized value for attributes. Where key is name of schema's attribute and value is
deserialized value from value dict.
:raises: ValidationError. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L416-L450 | null | class _Schema(types.Type):
"""Type for creating schema."""
def __new__(cls, **kwargs):
"""Create schema from keyword arguments."""
schema = type("Schema", (cls, ), {"__doc__": cls.__doc__})
schema.__class_attrs__ = OrderedDict()
schema.__attrs__ = OrderedDict()
for name, attr in kwargs.items():
if not hasattr(attr, "name"):
attr.name = name
schema.__class_attrs__[attr.name] = attr
schema.__attrs__[attr.name] = attr
return schema
@classmethod
def serialize(cls, value, **kwargs):
result = OrderedDict()
for attr in cls.__attrs__.values():
compartment = result
if attr.compartment is not None:
compartment = result.setdefault(attr.compartment, OrderedDict())
try:
compartment[attr.key] = attr.serialize(value, **kwargs)
except (AttributeError, KeyError):
if attr.required:
raise
if attr.compartment is not None and len(compartment) == 0:
del result[attr.compartment]
return result
@classmethod
|
paylogic/halogen | halogen/vnd/error.py | Error.from_validation_exception | python | def from_validation_exception(cls, exception, **kwargs):
errors = []
def flatten(error, path=""):
if isinstance(error, halogen.exceptions.ValidationError):
if not path.endswith("/"):
path += "/"
if error.attr is not None:
path += error.attr
elif error.index is not None:
path += six.text_type(error.index)
for e in error.errors:
flatten(e, path)
else:
message = error
if isinstance(error, Exception):
try:
message = error.message
except AttributeError:
message = six.text_type(error)
# TODO: i18n
errors.append(Error(message=message, path=path))
flatten(exception)
message = kwargs.pop("message", "Validation error.")
return cls(message=message, errors=sorted(errors, key=lambda error: error.path or ""), **kwargs) | Create an error from validation exception. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/vnd/error.py#L26-L53 | [
"def flatten(error, path=\"\"):\n if isinstance(error, halogen.exceptions.ValidationError):\n if not path.endswith(\"/\"):\n path += \"/\"\n if error.attr is not None:\n path += error.attr\n elif error.index is not None:\n path += six.text_type(error.index)\n\n for e in error.errors:\n flatten(e, path)\n else:\n message = error\n if isinstance(error, Exception):\n try:\n message = error.message\n except AttributeError:\n message = six.text_type(error)\n # TODO: i18n\n errors.append(Error(message=message, path=path))\n"
] | class Error(Exception):
"""Base exception."""
def __init__(self, message, path=None, errors=None):
"""Create an error.
:param message: Error message.
:param path: Optional JSON Pointer path.
:param errors: Optional nested errors.
"""
self.message = message
if path is not None:
self.path = path
if errors is not None:
self.errors = errors
@classmethod
|
paylogic/halogen | halogen/types.py | Type.deserialize | python | def deserialize(self, value, **kwargs):
for validator in self.validators:
validator.validate(value, **kwargs)
return value | Deserialization of value.
:return: Deserialized value.
:raises: :class:`halogen.exception.ValidationError` exception if value is not valid. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L29-L38 | null | class Type(object):
"""Base class for creating types."""
def __init__(self, validators=None, *args, **kwargs):
"""Type constructor.
:param validators: A list of :class:`halogen.validators.Validator` objects that check the validity of the
deserialized value. Validators raise :class:`halogen.exception.ValidationError` exceptions when
value is not valid.
"""
self.validators = validators or []
def serialize(self, value, **kwargs):
"""Serialization of value."""
return value
@staticmethod
def is_type(value):
"""Determine if value is an instance or subclass of the class Type."""
if isinstance(value, type):
return issubclass(value, Type)
return isinstance(value, Type)
|
paylogic/halogen | halogen/types.py | Type.is_type | python | def is_type(value):
if isinstance(value, type):
return issubclass(value, Type)
return isinstance(value, Type) | Determine if value is an instance or subclass of the class Type. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L41-L45 | null | class Type(object):
"""Base class for creating types."""
def __init__(self, validators=None, *args, **kwargs):
"""Type constructor.
:param validators: A list of :class:`halogen.validators.Validator` objects that check the validity of the
deserialized value. Validators raise :class:`halogen.exception.ValidationError` exceptions when
value is not valid.
"""
self.validators = validators or []
def serialize(self, value, **kwargs):
"""Serialization of value."""
return value
def deserialize(self, value, **kwargs):
"""Deserialization of value.
:return: Deserialized value.
:raises: :class:`halogen.exception.ValidationError` exception if value is not valid.
"""
for validator in self.validators:
validator.validate(value, **kwargs)
return value
@staticmethod
|
paylogic/halogen | halogen/types.py | List.serialize | python | def serialize(self, value, **kwargs):
return [self.item_type.serialize(val, **kwargs) for val in value] | Serialize every item of the list. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L61-L63 | null | class List(Type):
"""List type for Halogen schema attribute."""
def __init__(self, item_type=None, allow_scalar=False, *args, **kwargs):
"""Create a new List.
:param item_type: Item type or schema.
:param allow_scalar: Automatically convert scalar value to the list.
"""
super(List, self).__init__(*args, **kwargs)
self.item_type = item_type or Type()
self.allow_scalar = allow_scalar
def deserialize(self, value, **kwargs):
"""Deserialize every item of the list."""
if self.allow_scalar and not isinstance(value, (list, tuple)):
value = [value]
value = super(List, self).deserialize(value)
result = []
errors = []
for index, val in enumerate(value):
try:
result.append(self.item_type.deserialize(val, **kwargs))
except ValidationError as exc:
exc.index = index
errors.append(exc)
if errors:
raise ValidationError(errors)
return result
|
paylogic/halogen | halogen/types.py | List.deserialize | python | def deserialize(self, value, **kwargs):
if self.allow_scalar and not isinstance(value, (list, tuple)):
value = [value]
value = super(List, self).deserialize(value)
result = []
errors = []
for index, val in enumerate(value):
try:
result.append(self.item_type.deserialize(val, **kwargs))
except ValidationError as exc:
exc.index = index
errors.append(exc)
if errors:
raise ValidationError(errors)
return result | Deserialize every item of the list. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L65-L81 | [
"def deserialize(self, value, **kwargs):\n \"\"\"Deserialization of value.\n\n :return: Deserialized value.\n :raises: :class:`halogen.exception.ValidationError` exception if value is not valid.\n \"\"\"\n for validator in self.validators:\n validator.validate(value, **kwargs)\n\n return value\n"
] | class List(Type):
"""List type for Halogen schema attribute."""
def __init__(self, item_type=None, allow_scalar=False, *args, **kwargs):
"""Create a new List.
:param item_type: Item type or schema.
:param allow_scalar: Automatically convert scalar value to the list.
"""
super(List, self).__init__(*args, **kwargs)
self.item_type = item_type or Type()
self.allow_scalar = allow_scalar
def serialize(self, value, **kwargs):
"""Serialize every item of the list."""
return [self.item_type.serialize(val, **kwargs) for val in value]
|
paylogic/halogen | halogen/types.py | ISOUTCDateTime.format_as_utc | python | def format_as_utc(self, value):
if isinstance(value, datetime.datetime):
if value.tzinfo is not None:
value = value.astimezone(pytz.UTC)
value = value.replace(microsecond=0)
return value.isoformat().replace('+00:00', 'Z') | Format UTC times. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L110-L116 | null | class ISOUTCDateTime(Type):
"""ISO-8601 datetime schema type in UTC timezone."""
type = "datetime"
message = u"'{val}' is not a valid ISO-8601 datetime"
def serialize(self, value, **kwargs):
return self.format_as_utc(value) if value else None
def deserialize(self, value, **kwargs):
value = value() if callable(value) else value
try:
dateutil.parser.parse(value)
value = getattr(isodate, "parse_{0}".format(self.type))(value)
except (isodate.ISO8601Error, ValueError):
raise ValueError(self.message.format(val=value))
return super(ISOUTCDateTime, self).deserialize(value)
|
paylogic/halogen | halogen/types.py | Amount.amount_object_to_dict | python | def amount_object_to_dict(self, amount):
currency, amount = (
amount.as_quantized(digits=2).as_tuple()
if not isinstance(amount, dict)
else (amount["currency"], amount["amount"])
)
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
return {
"amount": str(amount),
"currency": str(currency),
} | Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L205-L225 | null | class Amount(Type):
"""Amount (money) schema type."""
err_unknown_currency = u"'{currency}' is not a valid currency."
def __init__(self, currencies, amount_class, **kwargs):
"""Initialize new instance of Amount.
:param currencies: list of all possible currency codes.
:param amount_class: class for the Amount deserialized value.
"""
self.currencies = currencies
self.amount_class = amount_class
super(Amount, self).__init__(**kwargs)
def serialize(self, value, **kwargs):
"""Serialize amount.
:param value: Amount value.
:return: Converted amount.
"""
if value is None:
return None
return self.amount_object_to_dict(value)
def deserialize(self, value, **kwargs):
"""Deserialize the amount.
:param value: Amount in CURRENCYAMOUNT or {"currency": CURRENCY, "amount": AMOUNT} format. For example EUR35.50
or {"currency": "EUR", "amount": "35.50"}
:return: A paylogic Amount object.
:raises ValidationError: when amount can"t be deserialzied
:raises ValidationError: when amount has more than 2 decimal places
"""
if value is None:
return None
if isinstance(value, six.string_types):
currency = value[:3]
amount = value[3:]
elif isinstance(value, dict):
if set(value.keys()) != set(("currency", "amount")):
raise ValueError("Amount object has to have currency and amount fields.")
amount = value["amount"]
currency = value["currency"]
else:
raise ValueError("Value cannot be parsed to Amount.")
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
try:
amount = decimal.Decimal(amount).normalize()
except decimal.InvalidOperation:
raise ValueError(u"'{amount}' cannot be parsed to decimal.".format(amount=amount))
if amount.as_tuple().exponent < - 2:
raise ValueError(u"'{amount}' has more than 2 decimal places.".format(amount=amount))
value = self.amount_class(currency=currency, amount=amount)
return super(Amount, self).deserialize(value)
|
paylogic/halogen | halogen/types.py | Amount.deserialize | python | def deserialize(self, value, **kwargs):
if value is None:
return None
if isinstance(value, six.string_types):
currency = value[:3]
amount = value[3:]
elif isinstance(value, dict):
if set(value.keys()) != set(("currency", "amount")):
raise ValueError("Amount object has to have currency and amount fields.")
amount = value["amount"]
currency = value["currency"]
else:
raise ValueError("Value cannot be parsed to Amount.")
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
try:
amount = decimal.Decimal(amount).normalize()
except decimal.InvalidOperation:
raise ValueError(u"'{amount}' cannot be parsed to decimal.".format(amount=amount))
if amount.as_tuple().exponent < - 2:
raise ValueError(u"'{amount}' has more than 2 decimal places.".format(amount=amount))
value = self.amount_class(currency=currency, amount=amount)
return super(Amount, self).deserialize(value) | Deserialize the amount.
:param value: Amount in CURRENCYAMOUNT or {"currency": CURRENCY, "amount": AMOUNT} format. For example EUR35.50
or {"currency": "EUR", "amount": "35.50"}
:return: A paylogic Amount object.
:raises ValidationError: when amount can"t be deserialzied
:raises ValidationError: when amount has more than 2 decimal places | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/types.py#L238-L274 | [
"def deserialize(self, value, **kwargs):\n \"\"\"Deserialization of value.\n\n :return: Deserialized value.\n :raises: :class:`halogen.exception.ValidationError` exception if value is not valid.\n \"\"\"\n for validator in self.validators:\n validator.validate(value, **kwargs)\n\n return value\n"
] | class Amount(Type):
"""Amount (money) schema type."""
err_unknown_currency = u"'{currency}' is not a valid currency."
def __init__(self, currencies, amount_class, **kwargs):
"""Initialize new instance of Amount.
:param currencies: list of all possible currency codes.
:param amount_class: class for the Amount deserialized value.
"""
self.currencies = currencies
self.amount_class = amount_class
super(Amount, self).__init__(**kwargs)
def amount_object_to_dict(self, amount):
"""Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys.
"""
currency, amount = (
amount.as_quantized(digits=2).as_tuple()
if not isinstance(amount, dict)
else (amount["currency"], amount["amount"])
)
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
return {
"amount": str(amount),
"currency": str(currency),
}
def serialize(self, value, **kwargs):
"""Serialize amount.
:param value: Amount value.
:return: Converted amount.
"""
if value is None:
return None
return self.amount_object_to_dict(value)
|
paylogic/halogen | halogen/validators.py | Length.validate | python | def validate(self, value):
try:
length = len(value)
except TypeError:
length = 0
if self.min_length is not None:
min_length = self.min_length() if callable(self.min_length) else self.min_length
if length < min_length:
raise exceptions.ValidationError(self.min_err.format(min_length))
if self.max_length is not None:
max_length = self.max_length() if callable(self.max_length) else self.max_length
if length > max_length:
raise exceptions.ValidationError(self.max_err.format(max_length)) | Validate the length of a list.
:param value: List of values.
:raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than
minimum or greater than maximum. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/validators.py#L92-L113 | null | class Length(Validator):
"""Length validator that checks the length of a List-like type."""
min_err = "Length is less than {0}"
max_err = "Length is greater than {0}"
def __init__(self, min_length=None, max_length=None, min_err=None, max_err=None):
"""Length validator constructor.
:param min_length: Minimum length (constant or collable), optional.
:param max_length: Maximum length (constant or collable), optional.
:param min_err: ValidationError message if length is less than minimal value.
:param max_err: ValidationError message if length is greater than maximal value.
"""
self.min_length = min_length
self.max_length = max_length
if min_err is not None:
self.min_err = min_err
if max_err is not None:
self.max_err = max_err
|
paylogic/halogen | halogen/validators.py | Range.validate | python | def validate(self, value):
if self.min is not None:
min_value = self.min() if callable(self.min) else self.min
if value < min_value:
raise exceptions.ValidationError(self.min_err.format(val=value, min=min_value))
if self.max is not None:
max_value = self.max() if callable(self.max) else self.max
if value > max_value:
raise exceptions.ValidationError(self.max_err.format(val=value, max=max_value)) | Validate value.
:param value: Value which should be validated.
:raises: :class:`halogen.exception.ValidationError` exception when either if value less than min in case when
min is not None or if value greater than max in case when max is not None. | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/validators.py#L143-L159 | null | class Range(object):
"""Range validator.
Validator which succeeds if the value it is passed is greater or equal to ``min`` and less than or equal to
``max``. If ``min`` is not specified, or is specified as ``None``, no lower bound exists. If ``max`` is not
specified, or is specified as ``None``, no upper bound exists.
"""
min_err = u'{val} is less than minimum value {min}'
max_err = u'{val} is greater than maximum value {max}'
def __init__(self, min=None, max=None, min_err=None, max_err=None):
"""Range validator constructor.
:param min: Minimal value of range (constant or collable), optional.
:param max: Maximal value of range (constant or collable), optional.
:param min_err: ValidationError message if value is less than minimal value of range.
:param max_err: ValidationError message if value is greater than maximal value of range.
"""
self.min = min
self.max = max
if min_err is not None:
self.min_err = min_err
if max_err is not None:
self.max_err = max_err
|
visualfabriq/bquery | bquery/benchmarks/bench_groupby.py | ctime | python | def ctime(message=None):
"Counts the time spent in some context"
global t_elapsed
t_elapsed = 0.0
print('\n')
t = time.time()
yield
if message:
print(message + ": ", end='')
t_elapsed = time.time() - t
print(round(t_elapsed, 4), "sec") | Counts the time spent in some context | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/benchmarks/bench_groupby.py#L29-L39 | null | from __future__ import print_function
# bench related imports
import numpy as np
import shutil
import bquery
import pandas as pd
import itertools as itt
import cytoolz
import cytoolz.dicttoolz
from toolz import valmap, compose
from cytoolz.curried import pluck
import blaze as blz
# other imports
import contextlib
import os
import time
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
t_elapsed = 0.0
@contextlib.contextmanager
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
print('Rows: ', n_rows)
# -- data
z = np.fromiter(((a, b, x, y) for a, b, x, y in izip(ga, gb, gx, gy)),
dtype='S2,S2,i8,i8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir, )
print(ct)
# -- pandas --
df = pd.DataFrame(z)
with ctime(message='pandas'):
result = df.groupby(['f0'])['f2'].sum()
print(result)
t_pandas = t_elapsed
# -- cytoolz --
with ctime(message='cytoolz over bcolz'):
# In Memory Split-Apply-Combine
# http://toolz.readthedocs.org/en/latest/streaming-analytics.html?highlight=reduce#split-apply-combine-with-groupby-and-reduceby
r = cytoolz.groupby(lambda row: row.f0, ct)
result = valmap(compose(sum, pluck(2)), r)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- blaze + bcolz --
blaze_data = blz.Data(ct.rootdir)
expr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())
with ctime(message='blaze over bcolz'):
result = blz.compute(expr)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- bquery --
with ctime(message='bquery over bcolz'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
ct.cache_factor(['f0'], refresh=True)
with ctime(message='bquery over bcolz (factorization cached)'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
shutil.rmtree(rootdir)
|
visualfabriq/bquery | bquery/ctable.py | rm_file_or_dir | python | def rm_file_or_dir(path, ignore_errors=True):
if os.path.exists(path):
if os.path.isdir(path):
if os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
if os.path.islink(path):
os.unlink(path)
else:
os.remove(path) | Helper function to clean a certain filepath
Parameters
----------
path
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L12-L34 | null | import os
import shutil
import tempfile
import uuid
import bcolz
import numpy as np
from bquery import ctable_ext
class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.cache_valid | python | def cache_valid(self, col):
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid | Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L60-L74 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.group_cache_valid | python | def group_cache_valid(self, col_list):
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid | Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L76-L93 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.cache_factor | python | def cache_factor(self, col_list, refresh=False):
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir) | Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L95-L152 | [
"def rm_file_or_dir(path, ignore_errors=True):\n \"\"\"\n Helper function to clean a certain filepath\n\n Parameters\n ----------\n path\n\n Returns\n -------\n\n \"\"\"\n if os.path.exists(path):\n if os.path.isdir(path):\n if os.path.islink(path):\n os.unlink(path)\n else:\n shutil.rmtree(path, ignore_errors=ignore_errors)\n else:\n if os.path.islink(path):\n os.unlink(path)\n else:\n os.remove(path)\n"
] | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.unique | python | def unique(self, col_or_col_list):
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output | Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L154-L192 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.aggregate_groups | python | def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__') | Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L194-L260 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.factorize_groupby_cols | python | def factorize_groupby_cols(self, groupby_cols):
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list | factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L318-L353 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable._int_array_hash | python | def _int_array_hash(input_list):
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray | A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L356-L383 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.create_group_column_factor | python | def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values | Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L385-L472 | [
"def rm_file_or_dir(path, ignore_errors=True):\n \"\"\"\n Helper function to clean a certain filepath\n\n Parameters\n ----------\n path\n\n Returns\n -------\n\n \"\"\"\n if os.path.exists(path):\n if os.path.isdir(path):\n if os.path.islink(path):\n os.unlink(path)\n else:\n shutil.rmtree(path, ignore_errors=ignore_errors)\n else:\n if os.path.islink(path):\n os.unlink(path)\n else:\n os.remove(path)\n"
] | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.make_group_index | python | def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key | Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key) | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L474-L547 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.create_tmp_rootdir | python | def create_tmp_rootdir(self):
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir | create a rootdir that we can destroy later again
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L549-L562 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.clean_tmp_rootdir | python | def clean_tmp_rootdir(self):
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir) | clean up all used temporary rootdirs
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L564-L574 | [
"def rm_file_or_dir(path, ignore_errors=True):\n \"\"\"\n Helper function to clean a certain filepath\n\n Parameters\n ----------\n path\n\n Returns\n -------\n\n \"\"\"\n if os.path.exists(path):\n if os.path.isdir(path):\n if os.path.islink(path):\n os.unlink(path)\n else:\n shutil.rmtree(path, ignore_errors=ignore_errors)\n else:\n if os.path.islink(path):\n os.unlink(path)\n else:\n os.remove(path)\n"
] | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.create_agg_ctable | python | def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops | Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L576-L654 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.where_terms | python | def where_terms(self, term_list, cache=False):
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr | Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L656-L740 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.where_terms_factorization_check | python | def where_terms_factorization_check(self, term_list):
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid | check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError: | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L742-L819 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
visualfabriq/bquery | bquery/ctable.py | ctable.is_in_ordered_subgroups | python | def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup) | Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
------- | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L821-L849 | null | class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
|
visualfabriq/bquery | bquery/toplevel.py | open | python | def open(rootdir, mode='a'):
# ----------------------------------------------------------------------
# https://github.com/Blosc/bcolz/blob/master/bcolz/toplevel.py#L104-L132
# ----------------------------------------------------------------------
# First try with a carray
rootsfile = os.path.join(rootdir, ROOTDIRS)
if os.path.exists(rootsfile):
return bquery.ctable(rootdir=rootdir, mode=mode)
else:
return bquery.carray(rootdir=rootdir, mode=mode) | open(rootdir, mode='a')
Open a disk-based carray/ctable.
This function could be used to open bcolz objects as bquery objects to
perform queries on them.
Parameters
----------
rootdir : pathname (string)
The directory hosting the carray/ctable object.
mode : the open mode (string)
Specifies the mode in which the object is opened. The supported
values are:
* 'r' for read-only
* 'w' for emptying the previous underlying data
* 'a' for allowing read/write on top of existing data
Returns
-------
out : a carray/ctable object or IOError (if not objects are found) | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/toplevel.py#L8-L41 | null | import os
from bcolz.ctable import ROOTDIRS
import bquery
|
visualfabriq/bquery | bquery/benchmarks/bench_pos.py | ctime | python | def ctime(message=None):
"Counts the time spent in some context"
t = time.time()
yield
if message:
print message + ":\t",
print round(time.time() - t, 4), "sec" | Counts the time spent in some context | train | https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/benchmarks/bench_pos.py#L14-L20 | null | from contextlib import contextmanager
import tempfile
import os
import random
import shutil
import time
import bcolz as bz
import bquery as bq
@contextmanager
@contextmanager
def on_disk_data_cleaner(generator):
rootdir = tempfile.mkdtemp(prefix='bcolz-')
os.rmdir(rootdir) # folder should be emtpy
ct = bz.fromiter(generator, dtype='i4,i4', count=N, rootdir=rootdir)
ct = bq.open(rootdir)
# print ct
ct.flush()
ct = bq.open(rootdir)
yield ct
shutil.rmtree(rootdir)
def gen(N):
x = 0
for i in range(N):
if random.randint(0, 1):
x += 1
yield x, random.randint(0, 20)
if __name__ == '__main__':
N = int(1e5)
g = gen(N)
with on_disk_data_cleaner(g) as ct:
f1 = ct['f1']
barr = bz.eval("f1 == 1") # filter
with ctime('is_in_ordered_subgroups'):
result = ct.is_in_ordered_subgroups(basket_col='f0', bool_arr=barr)
|
openregister/openregister-python | openregister/entry.py | Entry.timestamp | python | def timestamp(self, timestamp):
if timestamp is None:
self._timestamp = datetime.utcnow()
elif isinstance(timestamp, datetime):
self._timestamp = timestamp
else:
self._timestamp = datetime.strptime(timestamp, fmt) | Entry timestamp as datetime. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/entry.py#L27-L34 | null | class Entry(object):
"""An Entry, an ordered instance of an item in a register."""
fields = ['entry-number', 'item-hash', 'timestamp']
def __init__(self, entry_number=None, item_hash=None, timestamp=None):
if not (entry_number is None
or isinstance(entry_number, numbers.Integral)):
raise ValueError('entry_number')
self.entry_number = entry_number
self.item_hash = item_hash
self.timestamp = timestamp
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
@property
def primitive(self):
"""Entry as Python primitive."""
primitive = {}
if self.entry_number is not None:
primitive['entry-number'] = self.entry_number
if self.item_hash is not None:
primitive['item-hash'] = self.item_hash
primitive['timestamp'] = self.timestamp.strftime(fmt)
return primitive
@primitive.setter
def primitive(self, primitive):
"""Entry from Python primitive."""
self.entry_number = primitive['entry-number']
self.item_hash = primitive['item-hash']
self.timestamp = primitive['timestamp']
|
openregister/openregister-python | openregister/entry.py | Entry.primitive | python | def primitive(self):
primitive = {}
if self.entry_number is not None:
primitive['entry-number'] = self.entry_number
if self.item_hash is not None:
primitive['item-hash'] = self.item_hash
primitive['timestamp'] = self.timestamp.strftime(fmt)
return primitive | Entry as Python primitive. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/entry.py#L37-L47 | null | class Entry(object):
"""An Entry, an ordered instance of an item in a register."""
fields = ['entry-number', 'item-hash', 'timestamp']
def __init__(self, entry_number=None, item_hash=None, timestamp=None):
if not (entry_number is None
or isinstance(entry_number, numbers.Integral)):
raise ValueError('entry_number')
self.entry_number = entry_number
self.item_hash = item_hash
self.timestamp = timestamp
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Entry timestamp as datetime."""
if timestamp is None:
self._timestamp = datetime.utcnow()
elif isinstance(timestamp, datetime):
self._timestamp = timestamp
else:
self._timestamp = datetime.strptime(timestamp, fmt)
@property
@primitive.setter
def primitive(self, primitive):
"""Entry from Python primitive."""
self.entry_number = primitive['entry-number']
self.item_hash = primitive['item-hash']
self.timestamp = primitive['timestamp']
|
openregister/openregister-python | openregister/entry.py | Entry.primitive | python | def primitive(self, primitive):
self.entry_number = primitive['entry-number']
self.item_hash = primitive['item-hash']
self.timestamp = primitive['timestamp'] | Entry from Python primitive. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/entry.py#L50-L54 | null | class Entry(object):
"""An Entry, an ordered instance of an item in a register."""
fields = ['entry-number', 'item-hash', 'timestamp']
def __init__(self, entry_number=None, item_hash=None, timestamp=None):
if not (entry_number is None
or isinstance(entry_number, numbers.Integral)):
raise ValueError('entry_number')
self.entry_number = entry_number
self.item_hash = item_hash
self.timestamp = timestamp
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Entry timestamp as datetime."""
if timestamp is None:
self._timestamp = datetime.utcnow()
elif isinstance(timestamp, datetime):
self._timestamp = timestamp
else:
self._timestamp = datetime.strptime(timestamp, fmt)
@property
def primitive(self):
"""Entry as Python primitive."""
primitive = {}
if self.entry_number is not None:
primitive['entry-number'] = self.entry_number
if self.item_hash is not None:
primitive['item-hash'] = self.item_hash
primitive['timestamp'] = self.timestamp.strftime(fmt)
return primitive
@primitive.setter
|
openregister/openregister-python | openregister/client.py | Client.config | python | def config(self, name, suffix):
"Return config variable value, defaulting to environment"
var = '%s_%s' % (name, suffix)
var = var.upper().replace('-', '_')
if var in self._config:
return self._config[var]
return os.environ[var] | Return config variable value, defaulting to environment | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/client.py#L16-L22 | null | class Client(object):
"""
Access register items from an openregister server.
"""
def __init__(self, logger=None, config={}):
self.logger = logger
self._config = config
def get(self, url, params=None):
response = requests.get(url, params=params)
if self.logger:
self.logger.info("GET: %s [%s] %s" % (
response.url, response.status_code, response.text))
return response
def item(self, register, value):
response = self.get('%s/%s/%s.json' % (
self.config(register, 'register'),
register,
value))
json = response.json()
item = Item()
item.primitive = json['entry']
return item
def index(self, index, field, value):
"Search for records matching a value in an index service"
params = {
"q": value,
# search index has '_' instead of '-' in field names ..
"q.options": "{fields:['%s']}" % (field.replace('-', '_'))
}
response = self.get(self.config(index, 'search_url'), params=params)
results = [hit['fields'] for hit in response.json()['hits']['hit']]
for result in results:
for key in result:
result[key.replace('_', '-')] = result.pop(key)
return results
|
openregister/openregister-python | openregister/client.py | Client.index | python | def index(self, index, field, value):
"Search for records matching a value in an index service"
params = {
"q": value,
# search index has '_' instead of '-' in field names ..
"q.options": "{fields:['%s']}" % (field.replace('-', '_'))
}
response = self.get(self.config(index, 'search_url'), params=params)
results = [hit['fields'] for hit in response.json()['hits']['hit']]
for result in results:
for key in result:
result[key.replace('_', '-')] = result.pop(key)
return results | Search for records matching a value in an index service | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/client.py#L41-L56 | [
"def config(self, name, suffix):\n \"Return config variable value, defaulting to environment\"\n var = '%s_%s' % (name, suffix)\n var = var.upper().replace('-', '_')\n if var in self._config:\n return self._config[var]\n return os.environ[var]\n",
"def get(self, url, params=None):\n response = requests.get(url, params=params)\n if self.logger:\n self.logger.info(\"GET: %s [%s] %s\" % (\n response.url, response.status_code, response.text))\n return response\n"
] | class Client(object):
"""
Access register items from an openregister server.
"""
def __init__(self, logger=None, config={}):
self.logger = logger
self._config = config
def config(self, name, suffix):
"Return config variable value, defaulting to environment"
var = '%s_%s' % (name, suffix)
var = var.upper().replace('-', '_')
if var in self._config:
return self._config[var]
return os.environ[var]
def get(self, url, params=None):
response = requests.get(url, params=params)
if self.logger:
self.logger.info("GET: %s [%s] %s" % (
response.url, response.status_code, response.text))
return response
def item(self, register, value):
response = self.get('%s/%s/%s.json' % (
self.config(register, 'register'),
register,
value))
json = response.json()
item = Item()
item.primitive = json['entry']
return item
|
openregister/openregister-python | openregister/item.py | Item.primitive | python | def primitive(self):
dict = {}
for key, value in self.__dict__.items():
if not key.startswith('_'):
dict[key] = copy(value)
for key in dict:
if isinstance(dict[key], (set)):
dict[key] = sorted(list(dict[key]))
return dict | Python primitive representation. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/item.py#L49-L60 | null | class Item(object):
"""An Item, a content addressable set of attributes."""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __getitem__(self, key, default=None):
try:
return self.__dict__[key]
except KeyError:
return default
def __setitem__(self, key, value):
if not value:
if key in self.__dict__:
self.__dict__.__delitem__(key)
return None
self.__dict__[key] = value
return value
def get(self, key, default=None):
return self.__getitem__(key, default)
def set(self, key, value):
return self.__setitem__(key, value)
@property
def hash(self):
"""The git hash-object value of for the Item."""
return git_hash(self.json.encode("utf-8"))
@property
def hashkey(self):
"""The hash value as a RFC 3548 Base 32 encoded string."""
return base32_encode(self.hash)
@property
def keys(self):
return sorted(list(self.primitive.keys()))
@property
def values(self):
return (self.__dict__[key] for key in self.keys)
@property
@primitive.setter
def primitive(self, dictionary):
"""Item from Python primitive."""
self.__dict__ = {k: v for k, v in dictionary.items() if v}
|
openregister/openregister-python | openregister/item.py | Item.primitive | python | def primitive(self, dictionary):
self.__dict__ = {k: v for k, v in dictionary.items() if v} | Item from Python primitive. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/item.py#L63-L65 | null | class Item(object):
"""An Item, a content addressable set of attributes."""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __getitem__(self, key, default=None):
try:
return self.__dict__[key]
except KeyError:
return default
def __setitem__(self, key, value):
if not value:
if key in self.__dict__:
self.__dict__.__delitem__(key)
return None
self.__dict__[key] = value
return value
def get(self, key, default=None):
return self.__getitem__(key, default)
def set(self, key, value):
return self.__setitem__(key, value)
@property
def hash(self):
"""The git hash-object value of for the Item."""
return git_hash(self.json.encode("utf-8"))
@property
def hashkey(self):
"""The hash value as a RFC 3548 Base 32 encoded string."""
return base32_encode(self.hash)
@property
def keys(self):
return sorted(list(self.primitive.keys()))
@property
def values(self):
return (self.__dict__[key] for key in self.keys)
@property
def primitive(self):
"""Python primitive representation."""
dict = {}
for key, value in self.__dict__.items():
if not key.startswith('_'):
dict[key] = copy(value)
for key in dict:
if isinstance(dict[key], (set)):
dict[key] = sorted(list(dict[key]))
return dict
@primitive.setter
|
openregister/openregister-python | openregister/record.py | Record.primitive | python | def primitive(self):
primitive = copy(self.item.primitive)
primitive.update(self.entry.primitive)
return primitive | Record as Python primitive. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/record.py#L19-L23 | null | class Record(object):
"""
A Record, the tuple of an entry and it's item
Records are useful for representing the latest entry for a
field value.
Records are serialised as the merged entry and item
"""
def __init__(self, entry=None, item=None):
self.entry = entry
self.item = item
@property
@primitive.setter
def primitive(self, primitive):
"""Record from Python primitive."""
self.entry = Entry()
self.entry.primitive = primitive
primitive = copy(primitive)
for field in self.entry.fields:
del primitive[field]
self.item = Item()
self.item.primitive = primitive
|
openregister/openregister-python | openregister/record.py | Record.primitive | python | def primitive(self, primitive):
self.entry = Entry()
self.entry.primitive = primitive
primitive = copy(primitive)
for field in self.entry.fields:
del primitive[field]
self.item = Item()
self.item.primitive = primitive | Record from Python primitive. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/record.py#L26-L36 | null | class Record(object):
"""
A Record, the tuple of an entry and it's item
Records are useful for representing the latest entry for a
field value.
Records are serialised as the merged entry and item
"""
def __init__(self, entry=None, item=None):
self.entry = entry
self.item = item
@property
def primitive(self):
"""Record as Python primitive."""
primitive = copy(self.item.primitive)
primitive.update(self.entry.primitive)
return primitive
@primitive.setter
|
openregister/openregister-python | openregister/representations/tsv.py | load | python | def load(self, text, fieldnames=None):
lines = text.split('\n')
fieldnames = load_line(lines[0])
values = load_line(lines[1])
self.__dict__ = dict(zip(fieldnames, values)) | Item from TSV representation. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/tsv.py#L43-L48 | [
"def load_line(line):\n return [unescape(s) for s in line.rstrip('\\n').split('\\t')]\n"
] | from ..item import Item
from ..writer import Writer
content_type = 'text/tab-separated-values; charset=utf-8'
escaped_chars = [('\t', '\\t'), ('\n', '\\n'), ('\r', '\\r'), ('', '\\')]
def BadBackslash():
pass
def escape(value):
for a, b in escaped_chars:
if a:
value = value.replace(a, b)
return value
def unescape(value):
if value[-1:] == '\\':
raise BadBackslash
for a, b in escaped_chars:
value = value.replace(b, a)
return value
def encode(value):
if isinstance(value, str):
return value
return ';'.join(value)
def decode(value):
return value.split(';')
def load_line(line):
return [unescape(s) for s in line.rstrip('\n').split('\t')]
def reader(stream, fieldnames=None):
"""Read Items from a stream containing TSV."""
if not fieldnames:
fieldnames = load_line(stream.readline())
for line in stream:
values = load_line(line)
item = Item()
item.__dict__ = dict(zip(fieldnames, values))
yield item
def dump_line(values):
return ('\t'.join(escape(encode(value)) for value in values)) + '\n'
def dump(self):
"""TSV representation."""
dict = self.primitive
if not dict:
return ''
return dump_line(self.keys) + dump_line(self.values)
class Writer(Writer):
def __init__(self, stream, fieldnames):
self.stream = stream
self.fieldnames = fieldnames
self.stream.write(dump_line(self.fieldnames))
def write(self, item):
values = [item.get(key, '') for key in self.fieldnames]
self.stream.write(dump_line(values))
Item.tsv = property(dump, load)
|
openregister/openregister-python | openregister/representations/tsv.py | reader | python | def reader(stream, fieldnames=None):
if not fieldnames:
fieldnames = load_line(stream.readline())
for line in stream:
values = load_line(line)
item = Item()
item.__dict__ = dict(zip(fieldnames, values))
yield item | Read Items from a stream containing TSV. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/tsv.py#L51-L59 | [
"def load_line(line):\n return [unescape(s) for s in line.rstrip('\\n').split('\\t')]\n"
] | from ..item import Item
from ..writer import Writer
content_type = 'text/tab-separated-values; charset=utf-8'
escaped_chars = [('\t', '\\t'), ('\n', '\\n'), ('\r', '\\r'), ('', '\\')]
def BadBackslash():
pass
def escape(value):
for a, b in escaped_chars:
if a:
value = value.replace(a, b)
return value
def unescape(value):
if value[-1:] == '\\':
raise BadBackslash
for a, b in escaped_chars:
value = value.replace(b, a)
return value
def encode(value):
if isinstance(value, str):
return value
return ';'.join(value)
def decode(value):
return value.split(';')
def load_line(line):
return [unescape(s) for s in line.rstrip('\n').split('\t')]
def load(self, text, fieldnames=None):
"""Item from TSV representation."""
lines = text.split('\n')
fieldnames = load_line(lines[0])
values = load_line(lines[1])
self.__dict__ = dict(zip(fieldnames, values))
def dump_line(values):
return ('\t'.join(escape(encode(value)) for value in values)) + '\n'
def dump(self):
"""TSV representation."""
dict = self.primitive
if not dict:
return ''
return dump_line(self.keys) + dump_line(self.values)
class Writer(Writer):
def __init__(self, stream, fieldnames):
self.stream = stream
self.fieldnames = fieldnames
self.stream.write(dump_line(self.fieldnames))
def write(self, item):
values = [item.get(key, '') for key in self.fieldnames]
self.stream.write(dump_line(values))
Item.tsv = property(dump, load)
|
openregister/openregister-python | openregister/representations/tsv.py | dump | python | def dump(self):
dict = self.primitive
if not dict:
return ''
return dump_line(self.keys) + dump_line(self.values) | TSV representation. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/tsv.py#L66-L71 | [
"def dump_line(values):\n return ('\\t'.join(escape(encode(value)) for value in values)) + '\\n'\n"
] | from ..item import Item
from ..writer import Writer
content_type = 'text/tab-separated-values; charset=utf-8'
escaped_chars = [('\t', '\\t'), ('\n', '\\n'), ('\r', '\\r'), ('', '\\')]
def BadBackslash():
pass
def escape(value):
for a, b in escaped_chars:
if a:
value = value.replace(a, b)
return value
def unescape(value):
if value[-1:] == '\\':
raise BadBackslash
for a, b in escaped_chars:
value = value.replace(b, a)
return value
def encode(value):
if isinstance(value, str):
return value
return ';'.join(value)
def decode(value):
return value.split(';')
def load_line(line):
return [unescape(s) for s in line.rstrip('\n').split('\t')]
def load(self, text, fieldnames=None):
"""Item from TSV representation."""
lines = text.split('\n')
fieldnames = load_line(lines[0])
values = load_line(lines[1])
self.__dict__ = dict(zip(fieldnames, values))
def reader(stream, fieldnames=None):
"""Read Items from a stream containing TSV."""
if not fieldnames:
fieldnames = load_line(stream.readline())
for line in stream:
values = load_line(line)
item = Item()
item.__dict__ = dict(zip(fieldnames, values))
yield item
def dump_line(values):
return ('\t'.join(escape(encode(value)) for value in values)) + '\n'
class Writer(Writer):
def __init__(self, stream, fieldnames):
self.stream = stream
self.fieldnames = fieldnames
self.stream.write(dump_line(self.fieldnames))
def write(self, item):
values = [item.get(key, '') for key in self.fieldnames]
self.stream.write(dump_line(values))
Item.tsv = property(dump, load)
|
openregister/openregister-python | openregister/representations/csv.py | load | python | def load(self, text,
lineterminator='\r\n',
quotechar='"',
delimiter=",",
escapechar=escapechar,
quoting=csv.QUOTE_MINIMAL):
f = io.StringIO(text)
if not quotechar:
quoting = csv.QUOTE_NONE
reader = csv.DictReader(
f,
delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator)
if reader.fieldnames:
reader.fieldnames = [field.strip() for field in reader.fieldnames]
try:
self.primitive = next(reader)
except StopIteration:
self.primitive = {} | Item from CSV representation. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/csv.py#L14-L40 | null | import io
import csv
from ..item import Item
from ..writer import Writer
content_type = 'text/csv; charset=utf-8'
escapechar = '\\'
lineterminator = '\r\n'
quotechar = '"'
delimiter = ","
class Writer(Writer):
"""Write CSV of items."""
def __init__(self, stream, fieldnames,
delimiter=delimiter,
lineterminator=lineterminator,
quotechar=quotechar,
escapechar=escapechar,
quoting=csv.QUOTE_ALL):
if not quotechar:
quoting = csv.QUOTE_NONE
self.writer = csv.DictWriter(
stream,
fieldnames=fieldnames,
delimiter=delimiter,
lineterminator=lineterminator,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting)
self.writer.writeheader()
def write(self, item):
self.writer.writerow(item.primitive)
def dump(self, **kwargs):
"""CSV representation of a item."""
f = io.StringIO()
w = Writer(f, self.keys, **kwargs)
w.write(self)
text = f.getvalue().lstrip()
f.close()
return text
Item.csv = property(dump, load)
|
openregister/openregister-python | openregister/representations/csv.py | dump | python | def dump(self, **kwargs):
f = io.StringIO()
w = Writer(f, self.keys, **kwargs)
w.write(self)
text = f.getvalue().lstrip()
f.close()
return text | CSV representation of a item. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/csv.py#L70-L79 | [
"def write(self, item):\n self.writer.writerow(item.primitive)\n"
] | import io
import csv
from ..item import Item
from ..writer import Writer
content_type = 'text/csv; charset=utf-8'
escapechar = '\\'
lineterminator = '\r\n'
quotechar = '"'
delimiter = ","
def load(self, text,
lineterminator='\r\n',
quotechar='"',
delimiter=",",
escapechar=escapechar,
quoting=csv.QUOTE_MINIMAL):
"""Item from CSV representation."""
f = io.StringIO(text)
if not quotechar:
quoting = csv.QUOTE_NONE
reader = csv.DictReader(
f,
delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator)
if reader.fieldnames:
reader.fieldnames = [field.strip() for field in reader.fieldnames]
try:
self.primitive = next(reader)
except StopIteration:
self.primitive = {}
class Writer(Writer):
"""Write CSV of items."""
def __init__(self, stream, fieldnames,
delimiter=delimiter,
lineterminator=lineterminator,
quotechar=quotechar,
escapechar=escapechar,
quoting=csv.QUOTE_ALL):
if not quotechar:
quoting = csv.QUOTE_NONE
self.writer = csv.DictWriter(
stream,
fieldnames=fieldnames,
delimiter=delimiter,
lineterminator=lineterminator,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting)
self.writer.writeheader()
def write(self, item):
self.writer.writerow(item.primitive)
Item.csv = property(dump, load)
|
openregister/openregister-python | openregister/datatypes/digest.py | git_hash | python | def git_hash(blob):
head = str("blob " + str(len(blob)) + "\0").encode("utf-8")
return sha1(head + blob).hexdigest() | Return git-hash compatible SHA-1 hexdigits for a blob of data. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/datatypes/digest.py#L5-L8 | null | from hashlib import sha1
from base64 import b32encode
def base32_encode(hexdigest):
"""Return SHA-1 hexdigits as lower-case RFC 3548 base 32 encoding."""
return b32encode(bytes.fromhex(hexdigest)).decode('utf-8').lower()
|
openregister/openregister-python | openregister/representations/json.py | dump | python | def dump(self):
return json.dumps(
self.primitive,
sort_keys=True,
ensure_ascii=False,
separators=(',', ':')) | Item as a JSON representation. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/json.py#L17-L23 | null | from ..item import Item
from ..writer import Writer
import json
import re
START = re.compile('[ \t\n\r\[]*', re.VERBOSE | re.MULTILINE | re.DOTALL)
END = re.compile('[ \t\n\r,\]]*', re.VERBOSE | re.MULTILINE | re.DOTALL)
content_type = 'application/json'
def load(self, text):
"""Item from a JSON representation."""
self.__dict__ = json.loads(text)
def reader(stream):
"""Read Items from a stream containing a JSON array."""
string = stream.read()
decoder = json.JSONDecoder().raw_decode
index = START.match(string, 0).end()
while index < len(string):
obj, end = decoder(string, index)
item = Item()
item.primitive = obj
yield item
index = END.match(string, end).end()
class Writer(Writer):
"""Write Items to a stream as a JSON array."""
def __init__(self, stream, start="[", sep=",", eol="", end="]"):
self.stream = stream
self.sep = sep
self.eol = eol
self.end = end
self.stream.write(start)
self.sol = ""
def write(self, item):
self.stream.write(self.sol + dump(item) + self.eol)
self.sol = self.sep
def close(self):
self.stream.write(self.end)
Item.json = property(dump, load)
|
openregister/openregister-python | openregister/representations/json.py | reader | python | def reader(stream):
string = stream.read()
decoder = json.JSONDecoder().raw_decode
index = START.match(string, 0).end()
while index < len(string):
obj, end = decoder(string, index)
item = Item()
item.primitive = obj
yield item
index = END.match(string, end).end() | Read Items from a stream containing a JSON array. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/json.py#L26-L37 | null | from ..item import Item
from ..writer import Writer
import json
import re
START = re.compile('[ \t\n\r\[]*', re.VERBOSE | re.MULTILINE | re.DOTALL)
END = re.compile('[ \t\n\r,\]]*', re.VERBOSE | re.MULTILINE | re.DOTALL)
content_type = 'application/json'
def load(self, text):
"""Item from a JSON representation."""
self.__dict__ = json.loads(text)
def dump(self):
"""Item as a JSON representation."""
return json.dumps(
self.primitive,
sort_keys=True,
ensure_ascii=False,
separators=(',', ':'))
class Writer(Writer):
"""Write Items to a stream as a JSON array."""
def __init__(self, stream, start="[", sep=",", eol="", end="]"):
self.stream = stream
self.sep = sep
self.eol = eol
self.end = end
self.stream.write(start)
self.sol = ""
def write(self, item):
self.stream.write(self.sol + dump(item) + self.eol)
self.sol = self.sep
def close(self):
self.stream.write(self.end)
Item.json = property(dump, load)
|
openregister/openregister-python | openregister/representations/jsonl.py | reader | python | def reader(stream):
for line in stream:
item = Item()
item.json = line
yield item | Read Items from a stream containing lines of JSON. | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/representations/jsonl.py#L8-L13 | null | from ..item import Item
from .json import load, dump, Writer
content_type = 'application/json-l'
class Writer(Writer):
"""Write items to a JSON log stream"""
def __init__(self, stream, start="", sep="", eol="\n", end=""):
super().__init__(stream, start, sep, eol, end)
Item.jsonl = property(dump, load)
|
openregister/openregister-python | openregister/store.py | Store.meta | python | def meta(self, total, page=1, page_size=None):
if page_size is None or page_size < 0:
page_size = self.page_size
meta = {}
meta['total'] = total
meta['page_size'] = page_size
meta['pages'] = math.ceil(meta['total']/page_size)
meta['page'] = page
meta['skip'] = page_size * (page-1)
return meta | Calculate statistics for a collection
return: meta | train | https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/store.py#L12-L26 | null | class Store(object):
page_size = 10000
"""Interface for storage of Items."""
def __init__(self):
pass
def put(self, item):
"""
Store item
returns: item
"""
raise NotImplementedError
def add(self, item, timestamp=None):
"""
Add item as a new entry
returns: entry
"""
raise NotImplementedError
def item(self, item_hash):
"""
Retrieve item
returns: item
"""
raise NotImplementedError
def items(self, item_hash=None, page=1, page_size=None):
"""
Retrieve collection of items
item_hash: starting at this item
returns: item[]
"""
raise NotImplementedError
def entry(self, entry_number):
"""
Retrieve an entry
returns: entry
"""
raise NotImplementedError
def entries(self, item_hash=None, page=1, page_size=None):
"""
Retrieve collection of entries
item_hash: return only entries with this item_hash
returns: entry[]
"""
raise NotImplementedError
def record(self, value):
"""
Retrieve a record by key-field value
returns: record
"""
raise NotImplementedError
def records(self, field=None, value=None, page=1, page_size=None):
"""
Retrieve a records by field value
returns: record[]
"""
raise NotImplementedError
def register(self, register_name):
"""
Retrieve register info
returns: register
"""
raise NotImplementedError
|
ARMmbed/autoversion | src/auto_version/semver.py | get_current_semver | python | def get_current_semver(data):
# get the not-none values from data
known = {
key: data.get(alias)
for key, alias in config._forward_aliases.items()
if data.get(alias) is not None
}
# prefer the strict field, if available
potentials = [
known.pop(Constants.VERSION_STRICT_FIELD, None),
known.pop(Constants.VERSION_FIELD, None),
]
from_components = [known.get(k) for k in SemVerSigFig._fields if k in known]
if len(from_components) == 3:
potentials.append(".".join(from_components))
versions = set()
for potential in potentials:
if not potential:
continue
match = re_semver.match(potential)
if match:
parts = match.groupdict()
parts.pop("tail")
versions.add(SemVer(**parts))
if len(versions) > 1:
raise ValueError("conflicting versions within project: %s" % versions)
if not versions:
_LOG.debug("key pairs found: \n%r", known)
raise ValueError("could not find existing semver")
return versions.pop() | Given a dictionary of all version data available, determine the current version | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/semver.py#L15-L50 | null | """Functions for manipulating SemVer objects (Major.Minor.Patch)"""
import logging
import re
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.definitions import SemVer
from auto_version.definitions import SemVerSigFig
_LOG = logging.getLogger(__file__)
re_semver = re.compile(r"""(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<tail>.*)""")
def make_new_semver(current_semver, all_triggers, **overrides):
"""Defines how to increment semver based on which significant figure is triggered"""
new_semver = {}
bumped = False
for sig_fig in SemVerSigFig: # iterate sig figs in order of significance
value = getattr(current_semver, sig_fig)
override = overrides.get(sig_fig)
if override is not None:
new_semver[sig_fig] = override
if int(override) > int(value):
bumped = True
elif bumped:
new_semver[sig_fig] = "0"
elif sig_fig in all_triggers:
new_semver[sig_fig] = str(int(value) + 1)
bumped = True
else:
new_semver[sig_fig] = value
return SemVer(**new_semver)
|
ARMmbed/autoversion | src/auto_version/semver.py | make_new_semver | python | def make_new_semver(current_semver, all_triggers, **overrides):
new_semver = {}
bumped = False
for sig_fig in SemVerSigFig: # iterate sig figs in order of significance
value = getattr(current_semver, sig_fig)
override = overrides.get(sig_fig)
if override is not None:
new_semver[sig_fig] = override
if int(override) > int(value):
bumped = True
elif bumped:
new_semver[sig_fig] = "0"
elif sig_fig in all_triggers:
new_semver[sig_fig] = str(int(value) + 1)
bumped = True
else:
new_semver[sig_fig] = value
return SemVer(**new_semver) | Defines how to increment semver based on which significant figure is triggered | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/semver.py#L53-L71 | null | """Functions for manipulating SemVer objects (Major.Minor.Patch)"""
import logging
import re
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.definitions import SemVer
from auto_version.definitions import SemVerSigFig
_LOG = logging.getLogger(__file__)
re_semver = re.compile(r"""(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<tail>.*)""")
def get_current_semver(data):
"""Given a dictionary of all version data available, determine the current version"""
# get the not-none values from data
known = {
key: data.get(alias)
for key, alias in config._forward_aliases.items()
if data.get(alias) is not None
}
# prefer the strict field, if available
potentials = [
known.pop(Constants.VERSION_STRICT_FIELD, None),
known.pop(Constants.VERSION_FIELD, None),
]
from_components = [known.get(k) for k in SemVerSigFig._fields if k in known]
if len(from_components) == 3:
potentials.append(".".join(from_components))
versions = set()
for potential in potentials:
if not potential:
continue
match = re_semver.match(potential)
if match:
parts = match.groupdict()
parts.pop("tail")
versions.add(SemVer(**parts))
if len(versions) > 1:
raise ValueError("conflicting versions within project: %s" % versions)
if not versions:
_LOG.debug("key pairs found: \n%r", known)
raise ValueError("could not find existing semver")
return versions.pop()
|
ARMmbed/autoversion | scripts/tag_and_release.py | main | python | def main():
# see:
# https://packaging.python.org/tutorials/distributing-packages/#uploading-your-project-to-pypi
twine_repo = os.getenv('TWINE_REPOSITORY_URL') or os.getenv('TWINE_REPOSITORY')
print('tagging and releasing to %s as %s' % (
twine_repo,
os.getenv('TWINE_USERNAME')
))
if not twine_repo:
raise Exception('cannot release to implicit pypi repository. explicitly set the repo/url.')
version = subprocess.check_output(['pipenv', 'run', 'python', 'setup.py', '--version']).decode().strip()
if 'dev' in version:
raise Exception('cannot release unversioned project: %s' % version)
print('Preparing environment')
subprocess.check_call(['git', 'config', '--global', 'user.name', 'monty-bot'])
subprocess.check_call(['git', 'config', '--global', 'user.email', 'monty-bot@arm.com'])
url = subprocess.check_output(['git', 'remote', 'get-url', 'origin'])
branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
new_url = git_url_ssh_to_https(url.decode())
subprocess.check_call(['git', 'remote', 'set-url', 'origin', new_url])
branch_spec = 'origin/%s' % branch_name.decode('utf-8').strip()
subprocess.check_call(['git', 'branch', '--set-upstream-to', branch_spec])
print('Committing the changelog & version')
subprocess.check_call(['git', 'add', 'src/auto_version/__version__.py'])
subprocess.check_call(['git', 'add', 'CHANGELOG.md'])
subprocess.check_call(['git', 'add', 'docs/news/*'])
message = ':checkered_flag: :newspaper: Releasing version %s\n[skip ci]' % version
subprocess.check_call(['git', 'commit', '-m', message])
print('Tagging the project')
subprocess.check_call(['git', 'tag', '-a', version, '-m', 'Release %s' % version])
print('Pushing changes back to GitHub')
subprocess.check_call(['git', 'push', '--follow-tags'])
print('Marking this commit as latest')
subprocess.check_call(['git', 'tag', '-f', 'latest'])
subprocess.check_call(['git', 'push', '-f', '--tags'])
print('Generating a release package')
subprocess.check_call(
['pipenv', 'run', 'python', 'setup.py', 'clean', '--all', 'bdist_wheel', '--dist-dir', 'release-dist'])
print('Uploading to PyPI')
subprocess.check_call(['pipenv', 'run', 'python', '-m', 'twine', 'upload', 'release-dist/*'])
print('Done.') | Tags the current repository
and commits changes to news files | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/scripts/tag_and_release.py#L39-L86 | null | # --------------------------------------------------------------------------
# Autoversion
# (C) COPYRIGHT 2018 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Part of the CI process"""
import os
import subprocess
def git_url_ssh_to_https(url):
"""Convert a git url
url will look like
https://github.com/ARMmbed/autoversion.git
or
git@github.com:ARMmbed/autoversion.git
we want:
https://${GITHUB_TOKEN}@github.com/ARMmbed/autoversion.git
"""
path = url.split('github.com', 1)[1][1:].strip()
new = 'https://{GITHUB_TOKEN}@github.com/%s' % path
print('rewriting git url to: %s' % new)
return new.format(GITHUB_TOKEN=os.getenv('GITHUB_TOKEN'))
if __name__ == '__main__':
main()
|
ARMmbed/autoversion | src/auto_version/config.py | get_or_create_config | python | def get_or_create_config(path, config):
if os.path.isfile(path):
with open(path) as fh:
_LOG.debug("loading config from %s", os.path.abspath(path))
config._inflate(toml.load(fh))
else:
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
with open(path, "w") as fh:
toml.dump(config._deflate(), fh) | Using TOML format, load config from given path, or write out example based on defaults | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L81-L93 | [
"def _deflate(cls):\n \"\"\"Prepare for serialisation - returns a dictionary\"\"\"\n data = {k: v for k, v in vars(cls).items() if not k.startswith(\"_\")}\n return {Constants.CONFIG_KEY: data}\n",
"def _inflate(cls, data):\n \"\"\"Update config by deserialising input dictionary\"\"\"\n for k, v in data[Constants.CONFIG_KEY].items():\n setattr(cls, k, v)\n return cls._deflate()\n"
] | """Configuration system for the auto_version tool"""
import logging
import os
import toml
from auto_version.definitions import SemVerSigFig
_LOG = logging.getLogger(__name__)
class Constants(object):
"""Internal - reused strings"""
# regex groups
KEY_GROUP = "KEY"
VALUE_GROUP = "VALUE"
# internal field keys
VERSION_FIELD = "VERSION_KEY"
VERSION_STRICT_FIELD = "VERSION_KEY_STRICT"
VERSION_LOCK_FIELD = "VERSION_LOCK"
RELEASE_FIELD = "RELEASE_FIELD"
COMMIT_COUNT_FIELD = "COMMIT_COUNT"
COMMIT_FIELD = "COMMIT"
# as used in toml file
CONFIG_KEY = "AutoVersionConfig"
class AutoVersionConfig(object):
"""Configuration - can be overridden using a toml config file"""
CONFIG_NAME = "DEFAULT"
RELEASED_VALUE = True
VERSION_LOCK_VALUE = True
VERSION_UNLOCK_VALUE = False
key_aliases = {
"__version__": Constants.VERSION_FIELD,
"__strict_version__": Constants.VERSION_STRICT_FIELD,
"PRODUCTION": Constants.RELEASE_FIELD,
"MAJOR": SemVerSigFig.major,
"MINOR": SemVerSigFig.minor,
"PATCH": SemVerSigFig.patch,
"VERSION_LOCK": Constants.VERSION_LOCK_FIELD,
Constants.COMMIT_COUNT_FIELD: Constants.COMMIT_COUNT_FIELD,
Constants.COMMIT_FIELD: Constants.COMMIT_FIELD,
}
_forward_aliases = {} # autopopulated later - reverse mapping of the above
targets = [
os.path.join("src", "_version.py"),
]
regexers = {
".json": r"""^\s*[\"]?(?P<KEY>[\w:]+)[\"]?\s*:[\t ]*[\"']?(?P<VALUE>((\\\")?[^\r\n\t\f\v\",](\\\")?)+)[\"']?,?""", # noqa
".py": r"""^\s*['\"]?(?P<KEY>\w+)['\"]?\s*[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"]?,?""", # noqa
".cs": r"""^(\w*\s+)*(?P<KEY>\w+)\s?[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"].*""", # noqa
".csproj": r"""^<(?P<KEY>\w+)>(?P<VALUE>\S+)<\/\w+>""", # noqa
".properties": r"""^\s*(?P<KEY>\w+)\s*=[\t ]*(?P<VALUE>[^\r\n\t\f\v\"']+)?""", # noqa
}
trigger_patterns = {
SemVerSigFig.major: os.path.join("docs", "news", "*.major"),
SemVerSigFig.minor: os.path.join("docs", "news", "*.feature"),
SemVerSigFig.patch: os.path.join("docs", "news", "*.bugfix"),
}
DEVMODE_TEMPLATE = "{version}.dev{count}"
@classmethod
def _deflate(cls):
"""Prepare for serialisation - returns a dictionary"""
data = {k: v for k, v in vars(cls).items() if not k.startswith("_")}
return {Constants.CONFIG_KEY: data}
@classmethod
def _inflate(cls, data):
"""Update config by deserialising input dictionary"""
for k, v in data[Constants.CONFIG_KEY].items():
setattr(cls, k, v)
return cls._deflate()
|
ARMmbed/autoversion | src/auto_version/config.py | AutoVersionConfig._deflate | python | def _deflate(cls):
data = {k: v for k, v in vars(cls).items() if not k.startswith("_")}
return {Constants.CONFIG_KEY: data} | Prepare for serialisation - returns a dictionary | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L68-L71 | null | class AutoVersionConfig(object):
"""Configuration - can be overridden using a toml config file"""
CONFIG_NAME = "DEFAULT"
RELEASED_VALUE = True
VERSION_LOCK_VALUE = True
VERSION_UNLOCK_VALUE = False
key_aliases = {
"__version__": Constants.VERSION_FIELD,
"__strict_version__": Constants.VERSION_STRICT_FIELD,
"PRODUCTION": Constants.RELEASE_FIELD,
"MAJOR": SemVerSigFig.major,
"MINOR": SemVerSigFig.minor,
"PATCH": SemVerSigFig.patch,
"VERSION_LOCK": Constants.VERSION_LOCK_FIELD,
Constants.COMMIT_COUNT_FIELD: Constants.COMMIT_COUNT_FIELD,
Constants.COMMIT_FIELD: Constants.COMMIT_FIELD,
}
_forward_aliases = {} # autopopulated later - reverse mapping of the above
targets = [
os.path.join("src", "_version.py"),
]
regexers = {
".json": r"""^\s*[\"]?(?P<KEY>[\w:]+)[\"]?\s*:[\t ]*[\"']?(?P<VALUE>((\\\")?[^\r\n\t\f\v\",](\\\")?)+)[\"']?,?""", # noqa
".py": r"""^\s*['\"]?(?P<KEY>\w+)['\"]?\s*[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"]?,?""", # noqa
".cs": r"""^(\w*\s+)*(?P<KEY>\w+)\s?[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"].*""", # noqa
".csproj": r"""^<(?P<KEY>\w+)>(?P<VALUE>\S+)<\/\w+>""", # noqa
".properties": r"""^\s*(?P<KEY>\w+)\s*=[\t ]*(?P<VALUE>[^\r\n\t\f\v\"']+)?""", # noqa
}
trigger_patterns = {
SemVerSigFig.major: os.path.join("docs", "news", "*.major"),
SemVerSigFig.minor: os.path.join("docs", "news", "*.feature"),
SemVerSigFig.patch: os.path.join("docs", "news", "*.bugfix"),
}
DEVMODE_TEMPLATE = "{version}.dev{count}"
@classmethod
@classmethod
def _inflate(cls, data):
"""Update config by deserialising input dictionary"""
for k, v in data[Constants.CONFIG_KEY].items():
setattr(cls, k, v)
return cls._deflate()
|
ARMmbed/autoversion | src/auto_version/config.py | AutoVersionConfig._inflate | python | def _inflate(cls, data):
for k, v in data[Constants.CONFIG_KEY].items():
setattr(cls, k, v)
return cls._deflate() | Update config by deserialising input dictionary | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L74-L78 | null | class AutoVersionConfig(object):
"""Configuration - can be overridden using a toml config file"""
CONFIG_NAME = "DEFAULT"
RELEASED_VALUE = True
VERSION_LOCK_VALUE = True
VERSION_UNLOCK_VALUE = False
key_aliases = {
"__version__": Constants.VERSION_FIELD,
"__strict_version__": Constants.VERSION_STRICT_FIELD,
"PRODUCTION": Constants.RELEASE_FIELD,
"MAJOR": SemVerSigFig.major,
"MINOR": SemVerSigFig.minor,
"PATCH": SemVerSigFig.patch,
"VERSION_LOCK": Constants.VERSION_LOCK_FIELD,
Constants.COMMIT_COUNT_FIELD: Constants.COMMIT_COUNT_FIELD,
Constants.COMMIT_FIELD: Constants.COMMIT_FIELD,
}
_forward_aliases = {} # autopopulated later - reverse mapping of the above
targets = [
os.path.join("src", "_version.py"),
]
regexers = {
".json": r"""^\s*[\"]?(?P<KEY>[\w:]+)[\"]?\s*:[\t ]*[\"']?(?P<VALUE>((\\\")?[^\r\n\t\f\v\",](\\\")?)+)[\"']?,?""", # noqa
".py": r"""^\s*['\"]?(?P<KEY>\w+)['\"]?\s*[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"]?,?""", # noqa
".cs": r"""^(\w*\s+)*(?P<KEY>\w+)\s?[=:]\s*['\"]?(?P<VALUE>[^\r\n\t\f\v\"']+)['\"].*""", # noqa
".csproj": r"""^<(?P<KEY>\w+)>(?P<VALUE>\S+)<\/\w+>""", # noqa
".properties": r"""^\s*(?P<KEY>\w+)\s*=[\t ]*(?P<VALUE>[^\r\n\t\f\v\"']+)?""", # noqa
}
trigger_patterns = {
SemVerSigFig.major: os.path.join("docs", "news", "*.major"),
SemVerSigFig.minor: os.path.join("docs", "news", "*.feature"),
SemVerSigFig.patch: os.path.join("docs", "news", "*.bugfix"),
}
DEVMODE_TEMPLATE = "{version}.dev{count}"
@classmethod
def _deflate(cls):
"""Prepare for serialisation - returns a dictionary"""
data = {k: v for k, v in vars(cls).items() if not k.startswith("_")}
return {Constants.CONFIG_KEY: data}
@classmethod
|
ARMmbed/autoversion | src/auto_version/cli.py | get_cli | python | def get_cli():
parser = argparse.ArgumentParser(
prog="auto_version",
description="auto version v%s: a tool to control version numbers" % __version__,
)
parser.add_argument(
"--target",
action="append",
default=[],
help="Files containing version info. "
"Assumes unique variable names between files. (default: %s)."
% (config.targets,),
)
parser.add_argument(
"--bump",
choices=SemVerSigFig,
help="Bumps the specified part of SemVer string. "
"Use this locally to correctly modify the version file.",
)
parser.add_argument(
"--news",
"--file-triggers",
action="store_true",
dest="file_triggers",
help="Detects need to bump based on presence of files (as specified in config).",
)
parser.add_argument(
"--set",
help="Set the SemVer string. Use this locally to set the project version explicitly.",
)
parser.add_argument(
"--set-patch-count",
action="store_true",
help="Sets the patch number to the commit count.",
)
parser.add_argument(
"--lock",
action="store_true",
help="Locks the SemVer string. "
"Lock will remain for another call to autoversion before being cleared.",
)
parser.add_argument(
"--release",
action="store_true",
default=False,
help="Marks as a release build, which flags the build as released.",
)
parser.add_argument(
"--version",
action="store_true",
default=False,
help="Prints the version of auto_version itself (self-version).",
)
parser.add_argument("--config", help="Configuration file path.")
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="increase output verbosity. " "can be specified multiple times",
)
return parser.parse_known_args() | Load cli options | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/cli.py#L9-L71 | null | """Load cli options"""
import argparse
from auto_version.config import AutoVersionConfig as config
from auto_version.definitions import SemVerSigFig
from auto_version import __version__
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | replace_lines | python | def replace_lines(regexer, handler, lines):
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result | Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L37-L50 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | write_targets | python | def write_targets(targets, **params):
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
) | Writes version info into version file | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L53-L65 | [
"def replace_lines(regexer, handler, lines):\n \"\"\"Uses replacement handler to perform replacements on lines of text\n\n First we strip off all whitespace\n We run the replacement on a clean 'content' string\n Finally we replace the original content with the replaced version\n This ensures that we retain the correct whitespace from the original line\n \"\"\"\n result = []\n for line in lines:\n content = line.strip()\n replaced = regexer.sub(handler, content)\n result.append(line.replace(content, replaced, 1))\n return result\n",
"def regexer_for_targets(targets):\n \"\"\"Pairs up target files with their correct regex\"\"\"\n for target in targets:\n path, file_ext = os.path.splitext(target)\n regexer = config.regexers[file_ext]\n yield target, regexer\n"
] | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | regexer_for_targets | python | def regexer_for_targets(targets):
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer | Pairs up target files with their correct regex | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L68-L73 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | extract_keypairs | python | def extract_keypairs(lines, regexer):
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates | Given some lines of text, extract key-value pairs from them | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L76-L86 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | read_targets | python | def read_targets(targets):
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results | Reads generic key-value pairs from input files | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L89-L95 | [
"def regexer_for_targets(targets):\n \"\"\"Pairs up target files with their correct regex\"\"\"\n for target in targets:\n path, file_ext = os.path.splitext(target)\n regexer = config.regexers[file_ext]\n yield target, regexer\n",
"def extract_keypairs(lines, regexer):\n \"\"\"Given some lines of text, extract key-value pairs from them\"\"\"\n updates = {}\n for line in lines:\n # for consistency we must match the replacer and strip whitespace / newlines\n match = regexer.match(line.strip())\n if not match:\n continue\n k_v = match.groupdict()\n updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]\n return updates\n"
] | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | detect_file_triggers | python | def detect_file_triggers(trigger_patterns):
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | The existence of files matching configured globs will trigger a version bump | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L98-L108 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_all_triggers | python | def get_all_triggers(bump, file_triggers):
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers | Aggregated set of significant figures to bump | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L111-L119 | [
"def detect_file_triggers(trigger_patterns):\n \"\"\"The existence of files matching configured globs will trigger a version bump\"\"\"\n triggers = set()\n for trigger, pattern in trigger_patterns.items():\n matches = glob.glob(pattern)\n if matches:\n _LOG.debug(\"trigger: %s bump from %r\\n\\t%s\", trigger, pattern, matches)\n triggers.add(trigger)\n else:\n _LOG.debug(\"trigger: no match on %r\", pattern)\n return triggers\n"
] | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_lock_behaviour | python | def get_lock_behaviour(triggers, all_data, lock):
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates | Binary state lock protects from version increments if set | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L122-L136 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_final_version_string | python | def get_final_version_string(release_mode, semver, commit_count=0):
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates | Generates update dictionary entries for the version string | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L139-L156 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | get_dvcs_info | python | def get_dvcs_info():
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count} | Gets current repository info from git | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L159-L167 | null | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | main | python | def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates | Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return: | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L170-L266 | [
"def get_or_create_config(path, config):\n \"\"\"Using TOML format, load config from given path, or write out example based on defaults\"\"\"\n if os.path.isfile(path):\n with open(path) as fh:\n _LOG.debug(\"loading config from %s\", os.path.abspath(path))\n config._inflate(toml.load(fh))\n else:\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass\n with open(path, \"w\") as fh:\n toml.dump(config._deflate(), fh)\n",
"def write_targets(targets, **params):\n \"\"\"Writes version info into version file\"\"\"\n handler = ReplacementHandler(**params)\n for target, regexer in regexer_for_targets(targets):\n with open(target) as fh:\n lines = fh.readlines()\n lines = replace_lines(regexer, handler, lines)\n with open(target, \"w\") as fh:\n fh.writelines(lines)\n if handler.missing:\n raise Exception(\n \"Failed to complete all expected replacements: %r\" % handler.missing\n )\n",
"def read_targets(targets):\n \"\"\"Reads generic key-value pairs from input files\"\"\"\n results = {}\n for target, regexer in regexer_for_targets(targets):\n with open(target) as fh:\n results.update(extract_keypairs(fh.readlines(), regexer))\n return results\n",
"def get_all_triggers(bump, file_triggers):\n \"\"\"Aggregated set of significant figures to bump\"\"\"\n triggers = set()\n if file_triggers:\n triggers = triggers.union(detect_file_triggers(config.trigger_patterns))\n if bump:\n _LOG.debug(\"trigger: %s bump requested\", bump)\n triggers.add(bump)\n return triggers\n",
"def get_lock_behaviour(triggers, all_data, lock):\n \"\"\"Binary state lock protects from version increments if set\"\"\"\n updates = {}\n lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)\n # if we are explicitly setting or locking the version, then set the lock field True anyway\n if lock:\n updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE\n elif (\n triggers\n and lock_key\n and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)\n ):\n triggers.clear()\n updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE\n return updates\n",
"def get_final_version_string(release_mode, semver, commit_count=0):\n \"\"\"Generates update dictionary entries for the version string\"\"\"\n version_string = \".\".join(semver)\n maybe_dev_version_string = version_string\n updates = {}\n if release_mode:\n # in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True\n updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE\n else:\n # in dev mode, we have a dev marker e.g. `1.2.3.dev678`\n maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(\n version=version_string, count=commit_count\n )\n\n # make available all components of the semantic version including the full string\n updates[Constants.VERSION_FIELD] = maybe_dev_version_string\n updates[Constants.VERSION_STRICT_FIELD] = version_string\n return updates\n",
"def get_dvcs_info():\n \"\"\"Gets current repository info from git\"\"\"\n cmd = \"git rev-list --count HEAD\"\n commit_count = str(\n int(subprocess.check_output(shlex.split(cmd)).decode(\"utf8\").strip())\n )\n cmd = \"git rev-parse HEAD\"\n commit = str(subprocess.check_output(shlex.split(cmd)).decode(\"utf8\").strip())\n return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}\n",
"def get_current_semver(data):\n \"\"\"Given a dictionary of all version data available, determine the current version\"\"\"\n # get the not-none values from data\n known = {\n key: data.get(alias)\n for key, alias in config._forward_aliases.items()\n if data.get(alias) is not None\n }\n\n # prefer the strict field, if available\n potentials = [\n known.pop(Constants.VERSION_STRICT_FIELD, None),\n known.pop(Constants.VERSION_FIELD, None),\n ]\n\n from_components = [known.get(k) for k in SemVerSigFig._fields if k in known]\n if len(from_components) == 3:\n potentials.append(\".\".join(from_components))\n\n versions = set()\n for potential in potentials:\n if not potential:\n continue\n match = re_semver.match(potential)\n if match:\n parts = match.groupdict()\n parts.pop(\"tail\")\n versions.add(SemVer(**parts))\n\n if len(versions) > 1:\n raise ValueError(\"conflicting versions within project: %s\" % versions)\n\n if not versions:\n _LOG.debug(\"key pairs found: \\n%r\", known)\n raise ValueError(\"could not find existing semver\")\n return versions.pop()\n",
"def make_new_semver(current_semver, all_triggers, **overrides):\n \"\"\"Defines how to increment semver based on which significant figure is triggered\"\"\"\n new_semver = {}\n bumped = False\n for sig_fig in SemVerSigFig: # iterate sig figs in order of significance\n value = getattr(current_semver, sig_fig)\n override = overrides.get(sig_fig)\n if override is not None:\n new_semver[sig_fig] = override\n if int(override) > int(value):\n bumped = True\n elif bumped:\n new_semver[sig_fig] = \"0\"\n elif sig_fig in all_triggers:\n new_semver[sig_fig] = str(int(value) + 1)\n bumped = True\n else:\n new_semver[sig_fig] = value\n return SemVer(**new_semver)\n"
] | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
def main_from_cli():
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
"""
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
)
__name__ == "__main__" and main_from_cli()
|
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | main_from_cli | python | def main_from_cli():
args, others = get_cli()
if args.version:
print(__version__)
exit(0)
log_level = logging.WARNING - 10 * args.verbosity
logging.basicConfig(level=log_level, format="%(module)s %(levelname)8s %(message)s")
command_line_updates = parse_other_args(others)
old, new, updates = main(
set_to=args.set,
set_patch_count=args.set_patch_count,
lock=args.lock,
release=args.release,
bump=args.bump,
file_triggers=args.file_triggers,
config_path=args.config,
**command_line_updates
)
_LOG.info("previously: %s", old)
_LOG.info("currently: %s", new)
_LOG.debug("updates:\n%s", pprint.pformat(updates))
print(
updates.get(config._forward_aliases.get(Constants.VERSION_FIELD))
or updates.get(config._forward_aliases.get(Constants.VERSION_STRICT_FIELD))
) | Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L284-L320 | [
"def main(\n set_to=None,\n set_patch_count=None,\n release=None,\n bump=None,\n lock=None,\n file_triggers=None,\n config_path=None,\n **extra_updates\n):\n \"\"\"Main workflow.\n\n Load config from cli and file\n Detect \"bump triggers\" - things that cause a version increment\n Find the current version\n Create a new version\n Write out new version and any other requested variables\n\n :param set_to: explicitly set semver to this version string\n :param set_patch_count: sets the patch number to the commit count\n :param release: marks with a production flag\n just sets a single flag as per config\n :param bump: string indicating major/minor/patch\n more significant bumps will zero the less significant ones\n :param lock: locks the version string for the next call to autoversion\n lock only removed if a version bump would have occurred\n :param file_triggers: whether to enable bumping based on file triggers\n bumping occurs once if any file(s) exist that match the config\n :param config_path: path to config file\n :param extra_updates:\n :return:\n \"\"\"\n updates = {}\n\n if config_path:\n get_or_create_config(config_path, config)\n\n for k, v in config.regexers.items():\n config.regexers[k] = re.compile(v)\n\n # a forward-mapping of the configured aliases\n # giving <our config param> : <the configured value>\n # if a value occurs multiple times, we take the last set value\n for k, v in config.key_aliases.items():\n config._forward_aliases[v] = k\n\n all_data = read_targets(config.targets)\n current_semver = semver.get_current_semver(all_data)\n\n triggers = get_all_triggers(bump, file_triggers)\n updates.update(get_lock_behaviour(triggers, all_data, lock))\n updates.update(get_dvcs_info())\n\n if set_to:\n _LOG.debug(\"setting version directly: %s\", set_to)\n new_semver = auto_version.definitions.SemVer(*set_to.split(\".\"))\n if not lock:\n warnings.warn(\n \"After setting version manually, does it need locking for a CI flow?\",\n UserWarning,\n )\n elif set_patch_count:\n _LOG.debug(\n \"auto-incrementing version, using commit count for patch: %s\",\n updates[Constants.COMMIT_COUNT_FIELD],\n )\n new_semver = semver.make_new_semver(\n current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]\n )\n else:\n _LOG.debug(\"auto-incrementing version\")\n new_semver = semver.make_new_semver(current_semver, triggers)\n\n updates.update(\n get_final_version_string(\n release_mode=release,\n semver=new_semver,\n commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),\n )\n )\n\n for part in semver.SemVerSigFig:\n updates[part] = getattr(new_semver, part)\n\n # only rewrite a field that the user has specified in the configuration\n native_updates = {\n native: updates[key]\n for native, key in config.key_aliases.items()\n if key in updates\n }\n\n # finally, add in commandline overrides\n native_updates.update(extra_updates)\n\n write_targets(config.targets, **native_updates)\n\n return current_semver, new_semver, native_updates\n",
"def get_cli():\n \"\"\"Load cli options\"\"\"\n parser = argparse.ArgumentParser(\n prog=\"auto_version\",\n description=\"auto version v%s: a tool to control version numbers\" % __version__,\n )\n parser.add_argument(\n \"--target\",\n action=\"append\",\n default=[],\n help=\"Files containing version info. \"\n \"Assumes unique variable names between files. (default: %s).\"\n % (config.targets,),\n )\n parser.add_argument(\n \"--bump\",\n choices=SemVerSigFig,\n help=\"Bumps the specified part of SemVer string. \"\n \"Use this locally to correctly modify the version file.\",\n )\n parser.add_argument(\n \"--news\",\n \"--file-triggers\",\n action=\"store_true\",\n dest=\"file_triggers\",\n help=\"Detects need to bump based on presence of files (as specified in config).\",\n )\n parser.add_argument(\n \"--set\",\n help=\"Set the SemVer string. Use this locally to set the project version explicitly.\",\n )\n parser.add_argument(\n \"--set-patch-count\",\n action=\"store_true\",\n help=\"Sets the patch number to the commit count.\",\n )\n parser.add_argument(\n \"--lock\",\n action=\"store_true\",\n help=\"Locks the SemVer string. \"\n \"Lock will remain for another call to autoversion before being cleared.\",\n )\n parser.add_argument(\n \"--release\",\n action=\"store_true\",\n default=False,\n help=\"Marks as a release build, which flags the build as released.\",\n )\n parser.add_argument(\n \"--version\",\n action=\"store_true\",\n default=False,\n help=\"Prints the version of auto_version itself (self-version).\",\n )\n parser.add_argument(\"--config\", help=\"Configuration file path.\")\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n action=\"count\",\n default=0,\n help=\"increase output verbosity. \" \"can be specified multiple times\",\n )\n return parser.parse_known_args()\n",
"def parse_other_args(others):\n # pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION\n updates = {}\n for kwargs in others:\n try:\n k, v = kwargs.split(\"=\")\n _LOG.debug(\"parsing extra replacement from command line: %r = %r\", k, v)\n updates[k.strip()] = ast.literal_eval(v.strip())\n except Exception:\n _LOG.exception(\n \"Failed to unpack additional parameter pair: %r (ignored)\", kwargs\n )\n return updates\n"
] | """Generates DVCS version information
see also:
https://git-scm.com/docs/git-shortlog
https://www.python.org/dev/peps/pep-0440/
https://pypi.python.org/pypi/semver
https://pypi.python.org/pypi/bumpversion
https://github.com/warner/python-versioneer
https://pypi.org/project/autoversion/
https://pypi.org/project/auto-version/
https://github.com/javrasya/version-manager
"""
import ast
import glob
import logging
import os
import pprint
import re
import shlex
import subprocess
import warnings
from auto_version.cli import get_cli
from auto_version.config import AutoVersionConfig as config
from auto_version.config import Constants
from auto_version.config import get_or_create_config
import auto_version.definitions
from auto_version.replacement_handler import ReplacementHandler
from auto_version import semver
from auto_version import __version__
_LOG = logging.getLogger(__file__)
def replace_lines(regexer, handler, lines):
"""Uses replacement handler to perform replacements on lines of text
First we strip off all whitespace
We run the replacement on a clean 'content' string
Finally we replace the original content with the replaced version
This ensures that we retain the correct whitespace from the original line
"""
result = []
for line in lines:
content = line.strip()
replaced = regexer.sub(handler, content)
result.append(line.replace(content, replaced, 1))
return result
def write_targets(targets, **params):
"""Writes version info into version file"""
handler = ReplacementHandler(**params)
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
lines = fh.readlines()
lines = replace_lines(regexer, handler, lines)
with open(target, "w") as fh:
fh.writelines(lines)
if handler.missing:
raise Exception(
"Failed to complete all expected replacements: %r" % handler.missing
)
def regexer_for_targets(targets):
"""Pairs up target files with their correct regex"""
for target in targets:
path, file_ext = os.path.splitext(target)
regexer = config.regexers[file_ext]
yield target, regexer
def extract_keypairs(lines, regexer):
"""Given some lines of text, extract key-value pairs from them"""
updates = {}
for line in lines:
# for consistency we must match the replacer and strip whitespace / newlines
match = regexer.match(line.strip())
if not match:
continue
k_v = match.groupdict()
updates[k_v[Constants.KEY_GROUP]] = k_v[Constants.VALUE_GROUP]
return updates
def read_targets(targets):
"""Reads generic key-value pairs from input files"""
results = {}
for target, regexer in regexer_for_targets(targets):
with open(target) as fh:
results.update(extract_keypairs(fh.readlines(), regexer))
return results
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
def get_lock_behaviour(triggers, all_data, lock):
"""Binary state lock protects from version increments if set"""
updates = {}
lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD)
# if we are explicitly setting or locking the version, then set the lock field True anyway
if lock:
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE
elif (
triggers
and lock_key
and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE)
):
triggers.clear()
updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE
return updates
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates
def get_dvcs_info():
"""Gets current repository info from git"""
cmd = "git rev-list --count HEAD"
commit_count = str(
int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
)
cmd = "git rev-parse HEAD"
commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip())
return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
def main(
set_to=None,
set_patch_count=None,
release=None,
bump=None,
lock=None,
file_triggers=None,
config_path=None,
**extra_updates
):
"""Main workflow.
Load config from cli and file
Detect "bump triggers" - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
:param set_to: explicitly set semver to this version string
:param set_patch_count: sets the patch number to the commit count
:param release: marks with a production flag
just sets a single flag as per config
:param bump: string indicating major/minor/patch
more significant bumps will zero the less significant ones
:param lock: locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
:param file_triggers: whether to enable bumping based on file triggers
bumping occurs once if any file(s) exist that match the config
:param config_path: path to config file
:param extra_updates:
:return:
"""
updates = {}
if config_path:
get_or_create_config(config_path, config)
for k, v in config.regexers.items():
config.regexers[k] = re.compile(v)
# a forward-mapping of the configured aliases
# giving <our config param> : <the configured value>
# if a value occurs multiple times, we take the last set value
for k, v in config.key_aliases.items():
config._forward_aliases[v] = k
all_data = read_targets(config.targets)
current_semver = semver.get_current_semver(all_data)
triggers = get_all_triggers(bump, file_triggers)
updates.update(get_lock_behaviour(triggers, all_data, lock))
updates.update(get_dvcs_info())
if set_to:
_LOG.debug("setting version directly: %s", set_to)
new_semver = auto_version.definitions.SemVer(*set_to.split("."))
if not lock:
warnings.warn(
"After setting version manually, does it need locking for a CI flow?",
UserWarning,
)
elif set_patch_count:
_LOG.debug(
"auto-incrementing version, using commit count for patch: %s",
updates[Constants.COMMIT_COUNT_FIELD],
)
new_semver = semver.make_new_semver(
current_semver, triggers, patch=updates[Constants.COMMIT_COUNT_FIELD]
)
else:
_LOG.debug("auto-incrementing version")
new_semver = semver.make_new_semver(current_semver, triggers)
updates.update(
get_final_version_string(
release_mode=release,
semver=new_semver,
commit_count=updates.get(Constants.COMMIT_COUNT_FIELD, 0),
)
)
for part in semver.SemVerSigFig:
updates[part] = getattr(new_semver, part)
# only rewrite a field that the user has specified in the configuration
native_updates = {
native: updates[key]
for native, key in config.key_aliases.items()
if key in updates
}
# finally, add in commandline overrides
native_updates.update(extra_updates)
write_targets(config.targets, **native_updates)
return current_semver, new_semver, native_updates
def parse_other_args(others):
# pull extra kwargs from commandline, e.g. TESTRUNNER_VERSION
updates = {}
for kwargs in others:
try:
k, v = kwargs.split("=")
_LOG.debug("parsing extra replacement from command line: %r = %r", k, v)
updates[k.strip()] = ast.literal_eval(v.strip())
except Exception:
_LOG.exception(
"Failed to unpack additional parameter pair: %r (ignored)", kwargs
)
return updates
__name__ == "__main__" and main_from_cli()
|
contentful-labs/contentful.py | contentful/cda/resources.py | Array.resolve_links | python | def resolve_links(self):
for resource in self.items_mapped['Entry'].values():
for dct in [getattr(resource, '_cf_cda', {}), resource.fields]:
for k, v in dct.items():
if isinstance(v, ResourceLink):
resolved = self._resolve_resource_link(v)
if resolved is not None:
dct[k] = resolved
elif isinstance(v, (MultipleAssets, MultipleEntries, list)):
for idx, ele in enumerate(v):
if not isinstance(ele, ResourceLink):
break
resolved = self._resolve_resource_link(ele)
if resolved is not None:
v[idx] = resolved | Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/resources.py#L90-L111 | [
"def _resolve_resource_link(self, link):\n return self.items_mapped[link.link_type].get(link.resource_id)\n"
] | class Array(Resource):
"""Collection of multiple :class:`.Resource` instances.
**Attributes**:
- limit (int): `limit` parameter.
- skip (int): `skip` parameter.
- total (int): Total number of resources returned from the API.
- items (list): Resources contained within the response.
- items_mapped (dict): All contained resources mapped by Assets/Entries using the resource ID.
"""
def __init__(self, sys=None):
"""Array constructor.
:param sys: (dict) resource system attributes.
:return: :class:`.Array` instance.
"""
super(Array, self).__init__(sys)
self.limit = None
self.skip = None
self.total = None
self.items = []
self.items_mapped = {}
def __iter__(self):
# Proxy to the `items` attribute
return iter(self.items)
def __getitem__(self, index):
# Proxy to the `items` attribute
return self.items[index]
def _resolve_resource_link(self, link):
return self.items_mapped[link.link_type].get(link.resource_id)
|
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.from_json | python | def from_json(self, json):
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json) | Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L34-L51 | [
"def create_entry(self, json):\n \"\"\"Create :class:`.resources.Entry` from JSON.\n\n :param json: JSON dict.\n :return: Entry instance.\n \"\"\"\n sys = json['sys']\n ct = sys['contentType']['sys']['id']\n fields = json['fields']\n raw_fields = copy.deepcopy(fields)\n\n # Replace links with :class:`.resources.ResourceLink` objects.\n for k, v in fields.items():\n link = ResourceFactory._extract_link(v)\n if link is not None:\n fields[k] = link\n elif isinstance(v, list):\n for idx, ele in enumerate(v):\n link = ResourceFactory._extract_link(ele)\n if link is not None:\n v[idx] = link\n\n if ct in self.entries_mapping:\n clazz = self.entries_mapping[ct]\n result = clazz()\n\n for k, v in clazz.__entry_fields__.items():\n field_value = fields.get(v.field_id)\n if field_value is not None:\n setattr(result, k, ResourceFactory.convert_value(field_value, v))\n else:\n result = Entry()\n\n result.sys = sys\n result.fields = fields\n result.raw_fields = raw_fields\n\n return result\n",
"def create_asset(json):\n \"\"\"Create :class:`.resources.Asset` from JSON.\n\n :param json: JSON dict.\n :return: Asset instance.\n \"\"\"\n result = Asset(json['sys'])\n file_dict = json['fields']['file']\n result.fields = json['fields']\n result.url = file_dict['url']\n result.mimeType = file_dict['contentType']\n return result\n",
"def create_content_type(json):\n \"\"\"Create :class:`.resource.ContentType` from JSON.\n\n :param json: JSON dict.\n :return: ContentType instance.\n \"\"\"\n result = ContentType(json['sys'])\n\n for field in json['fields']:\n field_id = field['id']\n del field['id']\n result.fields[field_id] = field\n\n result.name = json['name']\n result.display_field = json.get('displayField')\n\n return result\n",
"def create_space(json):\n \"\"\"Create :class:`.resources.Space` from JSON.\n\n :param json: JSON dict.\n :return: Space instance.\n \"\"\"\n result = Space(json['sys'])\n result.name = json['name']\n return result\n",
"def create_array(self, json):\n \"\"\"Create :class:`.resources.Array` from JSON.\n\n :param json: JSON dict.\n :return: Array instance.\n \"\"\"\n result = Array(json['sys'])\n result.total = json['total']\n result.skip = json['skip']\n result.limit = json['limit']\n result.items = []\n result.items_mapped = {'Asset': {}, 'Entry': {}}\n\n self.process_array_items(result, json)\n self.process_array_includes(result, json)\n\n return result"
] | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_entry | python | def create_entry(self, json):
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result | Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L64-L101 | [
"def _extract_link(obj):\n if not isinstance(obj, dict):\n return None\n\n sys = obj.get('sys')\n if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:\n return ResourceLink(sys)\n\n return None\n",
"def convert_value(value, field):\n \"\"\"Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise\n attempt to convert it.\n\n :param value: field value.\n :param field: :class:`.fields.Field` instance.\n :return: Result value.\n \"\"\"\n clz = field.field_type\n\n if clz is Boolean:\n if not isinstance(value, bool):\n return bool(value)\n\n elif clz is Date:\n if not isinstance(value, str):\n value = str(value)\n return parser.parse(value)\n\n elif clz is Number:\n if not isinstance(value, int):\n return int(value)\n\n elif clz is Object:\n if not isinstance(value, dict):\n return ast.literal_eval(value)\n\n elif clz is Text or clz is Symbol:\n if not isinstance(value, str):\n return str(value)\n\n elif clz is List or clz is MultipleAssets or clz is MultipleEntries:\n if not isinstance(value, list):\n return [value]\n\n # No need to convert :class:`.fields.Link` types as the expected value\n # should be of type :class:`.resources.ResourceLink` for links.\n\n return value\n"
] | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_asset | python | def create_asset(json):
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result | Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L104-L115 | null | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_content_type | python | def create_content_type(json):
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result | Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L118-L134 | null | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.convert_value | python | def convert_value(value, field):
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value | Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L148-L186 | null | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.process_array_items | python | def process_array_items(self, array, json):
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed) | Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L189-L209 | [
"def from_json(self, json):\n \"\"\"Create resource out of JSON data.\n\n :param json: JSON dict.\n :return: Resource with a type defined by the given JSON data.\n \"\"\"\n res_type = json['sys']['type']\n\n if ResourceType.Array.value == res_type:\n return self.create_array(json)\n elif ResourceType.Entry.value == res_type:\n return self.create_entry(json)\n elif ResourceType.Asset.value == res_type:\n return ResourceFactory.create_asset(json)\n elif ResourceType.ContentType.value == res_type:\n return ResourceFactory.create_content_type(json)\n elif ResourceType.Space.value == res_type:\n return ResourceFactory.create_space(json)\n"
] | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.process_array_includes | python | def process_array_includes(self, array, json):
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed | Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L211-L224 | [
"def from_json(self, json):\n \"\"\"Create resource out of JSON data.\n\n :param json: JSON dict.\n :return: Resource with a type defined by the given JSON data.\n \"\"\"\n res_type = json['sys']['type']\n\n if ResourceType.Array.value == res_type:\n return self.create_array(json)\n elif ResourceType.Entry.value == res_type:\n return self.create_entry(json)\n elif ResourceType.Asset.value == res_type:\n return ResourceFactory.create_asset(json)\n elif ResourceType.ContentType.value == res_type:\n return ResourceFactory.create_content_type(json)\n elif ResourceType.Space.value == res_type:\n return ResourceFactory.create_space(json)\n"
] | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_array | python | def create_array(self, json):
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L226-L242 | [
"def process_array_items(self, array, json):\n \"\"\"Iterate through all `items` and create a resource for each.\n\n In addition map the resources under the `items_mapped` by the resource id and type.\n\n :param array: Array resource.\n :param json: Raw JSON dictionary.\n \"\"\"\n for item in json['items']:\n key = None\n processed = self.from_json(item)\n\n if isinstance(processed, Asset):\n key = 'Asset'\n elif isinstance(processed, Entry):\n key = 'Entry'\n\n if key is not None:\n array.items_mapped[key][processed.sys['id']] = processed\n\n array.items.append(processed)\n",
"def process_array_includes(self, array, json):\n \"\"\"Iterate through all `includes` and create a resource for every item.\n\n In addition map the resources under the `items_mapped` by the resource id and type.\n\n :param array: Array resource.\n :param json: Raw JSON dictionary.\n \"\"\"\n includes = json.get('includes') or {}\n for key in array.items_mapped.keys():\n if key in includes:\n for resource in includes[key]:\n processed = self.from_json(resource)\n array.items_mapped[key][processed.sys['id']] = processed\n"
] | class ResourceFactory(object):
"""Factory for generating :class:`.resources.Resource` subclasses out of JSON data.
Attributes:
entries_mapping (dict): Mapping of Content Type IDs to custom Entry subclasses.
"""
def __init__(self, custom_entries):
"""ResourceFactory constructor.
:param custom_entries: list of custom Entry subclasses.
:return: ResourceFactory instance.
"""
super(ResourceFactory, self).__init__()
self.entries_mapping = {}
if custom_entries is not None:
for c in custom_entries:
ct = c.__content_type__
self.entries_mapping[ct] = c
def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json)
@staticmethod
def _extract_link(obj):
if not isinstance(obj, dict):
return None
sys = obj.get('sys')
if isinstance(sys, dict) and sys.get('type') == ResourceType.Link.value:
return ResourceLink(sys)
return None
def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result
@staticmethod
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
@staticmethod
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result
@staticmethod
def create_space(json):
"""Create :class:`.resources.Space` from JSON.
:param json: JSON dict.
:return: Space instance.
"""
result = Space(json['sys'])
result.name = json['name']
return result
@staticmethod
def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value
# Array
def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed)
def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed
|
contentful-labs/contentful.py | contentful/cda/client.py | Client.validate_config | python | def validate_config(config):
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.') | Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L52-L70 | null | class Client(object):
"""Interface for retrieving resources from the Contentful Delivery API.
**Attributes**:
- dispatcher (:class:`.Dispatcher`): Dispatcher for invoking requests.
- config (:class:`.Config`): Configuration container.
"""
def __init__(self, space_id, access_token, custom_entries=None, secure=True, endpoint=None, resolve_links=True):
"""Client constructor.
:param space_id: (str) Space ID.
:param access_token: (str) Access Token.
:param custom_entries: (list) Optional list of :class:`.Entry` subclasses
used in order to register custom Entry subclasses to be instantiated by the client
when Entries of the given Content Type are retrieved from the server.
:param secure: (bool) Indicates whether the connection should be encrypted or not.
:param endpoint: (str) Custom remote API endpoint.
:param resolve_links: (bool) Indicates whether or not to resolve links automatically.
:return: :class:`Client` instance.
"""
super(Client, self).__init__()
config = Config(space_id, access_token, custom_entries, secure, endpoint, resolve_links)
self.config = config
self.validate_config(config)
self.dispatcher = Dispatcher(config, requests)
@staticmethod
def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def fetch_space(self):
"""Fetch the Space associated with this client.
:return: :class:`.Space` result instance.
"""
return Request(self.dispatcher, '').invoke()
def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result
def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array)
def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None
|
contentful-labs/contentful.py | contentful/cda/client.py | Client.fetch | python | def fetch(self, resource_class):
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links) | Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L72-L100 | [
"def path_for_class(clz):\n if issubclass(clz, Asset):\n return const.PATH_ASSETS\n elif issubclass(clz, ContentType):\n return const.PATH_CONTENT_TYPES\n elif issubclass(clz, Entry):\n return const.PATH_ENTRIES\n"
] | class Client(object):
"""Interface for retrieving resources from the Contentful Delivery API.
**Attributes**:
- dispatcher (:class:`.Dispatcher`): Dispatcher for invoking requests.
- config (:class:`.Config`): Configuration container.
"""
def __init__(self, space_id, access_token, custom_entries=None, secure=True, endpoint=None, resolve_links=True):
"""Client constructor.
:param space_id: (str) Space ID.
:param access_token: (str) Access Token.
:param custom_entries: (list) Optional list of :class:`.Entry` subclasses
used in order to register custom Entry subclasses to be instantiated by the client
when Entries of the given Content Type are retrieved from the server.
:param secure: (bool) Indicates whether the connection should be encrypted or not.
:param endpoint: (str) Custom remote API endpoint.
:param resolve_links: (bool) Indicates whether or not to resolve links automatically.
:return: :class:`Client` instance.
"""
super(Client, self).__init__()
config = Config(space_id, access_token, custom_entries, secure, endpoint, resolve_links)
self.config = config
self.validate_config(config)
self.dispatcher = Dispatcher(config, requests)
@staticmethod
def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.')
def fetch_space(self):
"""Fetch the Space associated with this client.
:return: :class:`.Space` result instance.
"""
return Request(self.dispatcher, '').invoke()
def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result
def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array)
def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None
|
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve | python | def resolve(self, link_resource_type, resource_id, array=None):
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result | Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L109-L132 | [
"def class_for_type(resource_type):\n if resource_type == ResourceType.Asset.value:\n return Asset\n elif resource_type == ResourceType.ContentType.value:\n return ContentType\n elif resource_type == ResourceType.Entry.value:\n return Entry\n elif resource_type == ResourceType.Space.value:\n return Space\n",
"def fetch(self, resource_class):\n \"\"\"Construct a :class:`.Request` for the given resource type.\n\n Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.\n\n Examples::\n\n client.fetch(Asset)\n client.fetch(Entry)\n client.fetch(ContentType)\n client.fetch(CustomEntryClass)\n\n :param resource_class: The type of resource to be fetched.\n :return: :class:`.Request` instance.\n \"\"\"\n if issubclass(resource_class, Entry):\n params = None\n content_type = getattr(resource_class, '__content_type__', None)\n if content_type is not None:\n params = {'content_type': resource_class.__content_type__}\n return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,\n params=params)\n\n else:\n remote_path = utils.path_for_class(resource_class)\n if remote_path is None:\n raise Exception('Invalid resource type \\\"{0}\\\".'.format(resource_class))\n\n return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)\n",
"def first(self):\n \"\"\"Attempt to retrieve only the first resource matching this request.\n\n :return: Result instance, or `None` if there are no matching resources.\n \"\"\"\n self.params['limit'] = 1\n result = self.all()\n return result.items[0] if result.total > 0 else None\n",
"def where(self, params):\n \"\"\"Set a dict of parameters to be passed to the API when invoking this request.\n\n :param params: (dict) query parameters.\n :return: this :class:`.RequestArray` instance for convenience.\n \"\"\"\n self.params = dict(self.params, **params) # params overrides self.params\n return self"
] | class Client(object):
"""Interface for retrieving resources from the Contentful Delivery API.
**Attributes**:
- dispatcher (:class:`.Dispatcher`): Dispatcher for invoking requests.
- config (:class:`.Config`): Configuration container.
"""
def __init__(self, space_id, access_token, custom_entries=None, secure=True, endpoint=None, resolve_links=True):
"""Client constructor.
:param space_id: (str) Space ID.
:param access_token: (str) Access Token.
:param custom_entries: (list) Optional list of :class:`.Entry` subclasses
used in order to register custom Entry subclasses to be instantiated by the client
when Entries of the given Content Type are retrieved from the server.
:param secure: (bool) Indicates whether the connection should be encrypted or not.
:param endpoint: (str) Custom remote API endpoint.
:param resolve_links: (bool) Indicates whether or not to resolve links automatically.
:return: :class:`Client` instance.
"""
super(Client, self).__init__()
config = Config(space_id, access_token, custom_entries, secure, endpoint, resolve_links)
self.config = config
self.validate_config(config)
self.dispatcher = Dispatcher(config, requests)
@staticmethod
def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.')
def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def fetch_space(self):
"""Fetch the Space associated with this client.
:return: :class:`.Space` result instance.
"""
return Request(self.dispatcher, '').invoke()
def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array)
def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None
|
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve_resource_link | python | def resolve_resource_link(self, resource_link, array=None):
return self.resolve(resource_link.link_type, resource_link.resource_id, array) | Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L134-L143 | [
"def resolve(self, link_resource_type, resource_id, array=None):\n \"\"\"Resolve a link to a CDA resource.\n\n Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`\n section of that array (containing both included and regular resources), in case the\n resource cannot be found in the array (or if no `array` was provided) - attempt to fetch\n the resource from the API by issuing a network request.\n\n :param link_resource_type: (str) Resource type as str.\n :param resource_id: (str) Remote ID of the linked resource.\n :param array: (:class:`.Array`) Optional array resource.\n :return: :class:`.Resource` subclass, `None` if it cannot be retrieved.\n \"\"\"\n result = None\n\n if array is not None:\n container = array.items_mapped.get(link_resource_type)\n result = container.get(resource_id)\n\n if result is None:\n clz = utils.class_for_type(link_resource_type)\n result = self.fetch(clz).where({'sys.id': resource_id}).first()\n\n return result\n"
] | class Client(object):
"""Interface for retrieving resources from the Contentful Delivery API.
**Attributes**:
- dispatcher (:class:`.Dispatcher`): Dispatcher for invoking requests.
- config (:class:`.Config`): Configuration container.
"""
def __init__(self, space_id, access_token, custom_entries=None, secure=True, endpoint=None, resolve_links=True):
"""Client constructor.
:param space_id: (str) Space ID.
:param access_token: (str) Access Token.
:param custom_entries: (list) Optional list of :class:`.Entry` subclasses
used in order to register custom Entry subclasses to be instantiated by the client
when Entries of the given Content Type are retrieved from the server.
:param secure: (bool) Indicates whether the connection should be encrypted or not.
:param endpoint: (str) Custom remote API endpoint.
:param resolve_links: (bool) Indicates whether or not to resolve links automatically.
:return: :class:`Client` instance.
"""
super(Client, self).__init__()
config = Config(space_id, access_token, custom_entries, secure, endpoint, resolve_links)
self.config = config
self.validate_config(config)
self.dispatcher = Dispatcher(config, requests)
@staticmethod
def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.')
def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def fetch_space(self):
"""Fetch the Space associated with this client.
:return: :class:`.Space` result instance.
"""
return Request(self.dispatcher, '').invoke()
def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result
def resolve_dict_link(self, dct, array=None):
"""Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None
|
contentful-labs/contentful.py | contentful/cda/client.py | Client.resolve_dict_link | python | def resolve_dict_link(self, dct, array=None):
sys = dct.get('sys')
return self.resolve(sys['linkType'], sys['id'], array) if sys is not None else None | Convenience method for resolving links given a dict object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param dct: (dict) Dictionary with the link data.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L145-L155 | [
"def resolve(self, link_resource_type, resource_id, array=None):\n \"\"\"Resolve a link to a CDA resource.\n\n Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`\n section of that array (containing both included and regular resources), in case the\n resource cannot be found in the array (or if no `array` was provided) - attempt to fetch\n the resource from the API by issuing a network request.\n\n :param link_resource_type: (str) Resource type as str.\n :param resource_id: (str) Remote ID of the linked resource.\n :param array: (:class:`.Array`) Optional array resource.\n :return: :class:`.Resource` subclass, `None` if it cannot be retrieved.\n \"\"\"\n result = None\n\n if array is not None:\n container = array.items_mapped.get(link_resource_type)\n result = container.get(resource_id)\n\n if result is None:\n clz = utils.class_for_type(link_resource_type)\n result = self.fetch(clz).where({'sys.id': resource_id}).first()\n\n return result\n"
] | class Client(object):
"""Interface for retrieving resources from the Contentful Delivery API.
**Attributes**:
- dispatcher (:class:`.Dispatcher`): Dispatcher for invoking requests.
- config (:class:`.Config`): Configuration container.
"""
def __init__(self, space_id, access_token, custom_entries=None, secure=True, endpoint=None, resolve_links=True):
"""Client constructor.
:param space_id: (str) Space ID.
:param access_token: (str) Access Token.
:param custom_entries: (list) Optional list of :class:`.Entry` subclasses
used in order to register custom Entry subclasses to be instantiated by the client
when Entries of the given Content Type are retrieved from the server.
:param secure: (bool) Indicates whether the connection should be encrypted or not.
:param endpoint: (str) Custom remote API endpoint.
:param resolve_links: (bool) Indicates whether or not to resolve links automatically.
:return: :class:`Client` instance.
"""
super(Client, self).__init__()
config = Config(space_id, access_token, custom_entries, secure, endpoint, resolve_links)
self.config = config
self.validate_config(config)
self.dispatcher = Dispatcher(config, requests)
@staticmethod
def validate_config(config):
"""Verify sanity for a :class:`.Config` instance.
This will raise an exception in case conditions are not met, otherwise
will complete silently.
:param config: (:class:`.Config`) Configuration container.
"""
non_null_params = ['space_id', 'access_token']
for param in non_null_params:
if getattr(config, param) is None:
raise Exception('Configuration for \"{0}\" must not be empty.'.format(param))
for clazz in config.custom_entries:
if not issubclass(clazz, Entry):
raise Exception(
'Provided class \"{0}\" must be a subclass of Entry.'.format(clazz.__name__))
elif clazz is Entry:
raise Exception('Cannot register "Entry" as a custom entry class.')
def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
def fetch_space(self):
"""Fetch the Space associated with this client.
:return: :class:`.Space` result instance.
"""
return Request(self.dispatcher, '').invoke()
def resolve(self, link_resource_type, resource_id, array=None):
"""Resolve a link to a CDA resource.
Provided an `array` argument, attempt to retrieve the resource from the `mapped_items`
section of that array (containing both included and regular resources), in case the
resource cannot be found in the array (or if no `array` was provided) - attempt to fetch
the resource from the API by issuing a network request.
:param link_resource_type: (str) Resource type as str.
:param resource_id: (str) Remote ID of the linked resource.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
result = None
if array is not None:
container = array.items_mapped.get(link_resource_type)
result = container.get(resource_id)
if result is None:
clz = utils.class_for_type(link_resource_type)
result = self.fetch(clz).where({'sys.id': resource_id}).first()
return result
def resolve_resource_link(self, resource_link, array=None):
"""Convenience method for resolving links given a :class:`.resources.ResourceLink` object.
Extract link values and pass to the :func:`.resolve` method of this class.
:param resource_link: (:class:`.ResourceLink`) instance.
:param array: (:class:`.Array`) Optional array resource.
:return: :class:`.Resource` subclass, `None` if it cannot be retrieved.
"""
return self.resolve(resource_link.link_type, resource_link.resource_id, array)
|
contentful-labs/contentful.py | contentful/cda/client.py | Dispatcher.invoke | python | def invoke(self, request):
url = '{0}/{1}'.format(self.base_url, request.remote_path)
r = self.httpclient.get(url, params=request.params, headers=self.get_headers())
if 200 <= r.status_code < 300:
return self.resource_factory.from_json(r.json())
else:
if r.status_code in ErrorMapping.mapping:
raise ErrorMapping.mapping[r.status_code](r)
else:
raise ApiError(r) | Invoke the given :class:`.Request` instance using the associated :class:`.Dispatcher`.
:param request: :class:`.Request` instance to invoke.
:return: :class:`.Resource` subclass. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L209-L223 | [
"def get_headers(self):\n \"\"\"Create and return a base set of headers to be carried with all requests.\n\n :return: dict containing header values.\n \"\"\"\n return {'Authorization': 'Bearer {0}'.format(self.config.access_token), 'User-Agent': self.user_agent}\n"
] | class Dispatcher(object):
"""Responsible for invoking :class:`.Request` instances and delegating result processing.
**Attributes**:
- config (:class:`.Config`): Configuration settings.
- resource_factory (:class:`.ResourceFactory`): Factory to use for generating resources out of JSON responses.
- httpclient (module): HTTP client module.
- base_url (str): Base URL of the remote endpoint.
- user_agent (str): ``User-Agent`` header to pass with requests.
"""
def __init__(self, config, httpclient):
"""Dispatcher constructor.
:param config: Configuration container.
:param httpclient: HTTP client.
:return: :class:`.Dispatcher` instance.
"""
super(Dispatcher, self).__init__()
self.config = config
self.resource_factory = ResourceFactory(config.custom_entries)
self.httpclient = httpclient
self.user_agent = 'contentful.py/{0}'.format(__version__)
scheme = 'https' if config.secure else 'http'
self.base_url = '{0}://{1}/spaces/{2}'.format(scheme, config.endpoint, config.space_id)
def get_headers(self):
"""Create and return a base set of headers to be carried with all requests.
:return: dict containing header values.
"""
return {'Authorization': 'Bearer {0}'.format(self.config.access_token), 'User-Agent': self.user_agent}
|
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.all | python | def all(self):
result = self.invoke()
if self.resolve_links:
result.resolve_links()
return result | Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L263-L272 | [
"def invoke(self):\n \"\"\"Invoke :class:`.Request` instance using the associated :class:`.Dispatcher`.\n\n :return: Result instance as returned by the :class:`.Dispatcher`.\n \"\"\"\n return self.dispatcher.invoke(self)\n"
] | class RequestArray(Request):
"""Represents a single request for retrieving multiple resources from the API."""
def __init__(self, dispatcher, remote_path, resolve_links, params=None):
super(RequestArray, self).__init__(dispatcher, remote_path, params)
self.resolve_links = resolve_links
def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None
def where(self, params):
"""Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience.
"""
self.params = dict(self.params, **params) # params overrides self.params
return self |
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.first | python | def first(self):
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None | Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L274-L281 | [
"def all(self):\n \"\"\"Attempt to retrieve all available resources matching this request.\n\n :return: Result instance as returned by the :class:`.Dispatcher`.\n \"\"\"\n result = self.invoke()\n if self.resolve_links:\n result.resolve_links()\n\n return result\n"
] | class RequestArray(Request):
"""Represents a single request for retrieving multiple resources from the API."""
def __init__(self, dispatcher, remote_path, resolve_links, params=None):
super(RequestArray, self).__init__(dispatcher, remote_path, params)
self.resolve_links = resolve_links
def all(self):
"""Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`.
"""
result = self.invoke()
if self.resolve_links:
result.resolve_links()
return result
def where(self, params):
"""Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience.
"""
self.params = dict(self.params, **params) # params overrides self.params
return self |
contentful-labs/contentful.py | contentful/cda/client.py | RequestArray.where | python | def where(self, params):
self.params = dict(self.params, **params) # params overrides self.params
return self | Set a dict of parameters to be passed to the API when invoking this request.
:param params: (dict) query parameters.
:return: this :class:`.RequestArray` instance for convenience. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L283-L290 | null | class RequestArray(Request):
"""Represents a single request for retrieving multiple resources from the API."""
def __init__(self, dispatcher, remote_path, resolve_links, params=None):
super(RequestArray, self).__init__(dispatcher, remote_path, params)
self.resolve_links = resolve_links
def all(self):
"""Attempt to retrieve all available resources matching this request.
:return: Result instance as returned by the :class:`.Dispatcher`.
"""
result = self.invoke()
if self.resolve_links:
result.resolve_links()
return result
def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None
|
contentful-labs/contentful.py | contentful/cda/errors.py | api_exception | python | def api_exception(http_code):
def wrapper(*args):
code = args[0]
ErrorMapping.mapping[http_code] = code
return code
return wrapper | Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function. | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/errors.py#L23-L33 | null | """errors module."""
class ErrorMapping(object):
"""Holds a mapping of HTTP status codes and :class:`.ApiError` subclasses."""
mapping = {}
class ApiError(Exception):
"""Class representing an error returned by the API."""
def __init__(self, result, message=None):
"""ApiError constructor.
:param result: Raw result object.
:param message: (str) Optional message.
:return: :class:`.ApiError` instance.
"""
self.result = result
super(ApiError, self).__init__(
message or result.text or 'Request failed with status \"{0}\".'.format(result.status_code))
@api_exception(400)
class BadRequest(ApiError):
"""Bad Request"""
@api_exception(401)
class Unauthorized(ApiError):
"""Unauthorized"""
@api_exception(403)
class AccessDenied(ApiError):
"""Access Denied"""
@api_exception(404)
class NotFound(ApiError):
"""Not Found"""
@api_exception(500)
class ServerError(ApiError):
"""Internal Server Error"""
@api_exception(503)
class ServiceUnavailable(ApiError):
"""Service Unavailable Error""" |
shmir/PyTrafficGenerator | trafficgenerator/tgn_utils.py | new_log_file | python | def new_log_file(logger, suffix, file_type='tcl'):
file_handler = None
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
file_handler = handler
new_logger = logging.getLogger(file_type + suffix)
if file_handler:
logger_file_name = path.splitext(file_handler.baseFilename)[0]
tcl_logger_file_name = logger_file_name + '-' + suffix + '.' + file_type
new_logger.addHandler(logging.FileHandler(tcl_logger_file_name, 'w'))
new_logger.setLevel(logger.getEffectiveLevel())
return new_logger | Create new logger and log file from existing logger.
The new logger will be create in the same directory as the existing logger file and will be named
as the existing log file with the requested suffix.
:param logger: existing logger
:param suffix: string to add to the existing log file name to create the new log file name.
:param file_type: logger file type (tcl. txt. etc.)
:return: the newly created logger | train | https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_utils.py#L81-L104 | null | """
TGN projects utilities and errors.
@author: yoram.shamir
"""
import logging
from os import path
from enum import Enum
import collections
class TgnType(Enum):
ixexplorer = 1
ixnetwork = 2
testcenter = 3
class ApiType(Enum):
tcl = 1
python = 2
rest = 3
socket = 4
def flatten(x):
if isinstance(x, collections.Iterable):
return [a for i in x for a in flatten(i)]
else:
return [x]
def is_true(str_value):
"""
:param str_value: String to evaluate.
:returns: True if string represents True TGN attribute value else return False.
"""
return str_value.lower() in ('true', 'yes', '1', '::ixnet::ok')
def is_false(str_value):
"""
:param str_value: String to evaluate.
:returns: True if string represents TGN attribute False value else return True.
"""
return str_value.lower() in ('false', 'no', '0', 'null', 'none', '::ixnet::obj-null')
def is_local_host(location):
"""
:param location: Location string in the format ip[/slot[/port]].
:returns: True if ip represents localhost or offilne else return False.
"""
return any(x in location.lower() for x in ('localhost', '127.0.0.1', 'offline', 'null'))
def is_ipv4(str_value):
"""
:param str_value: String to evaluate.
:returns: True if string represents IPv4 else return False.
"""
return str_value.lower() in ('ipv4', 'ipv4if')
def is_ipv6(str_value):
"""
:param str_value: String to evaluate.
:returns: True if string represents IPv6 else return False.
"""
return str_value.lower() in ('ipv6', 'ipv6if')
def is_ip(str_value):
"""
:param str_value: String to evaluate.
:returns: True if string is IPv4 or IPv6 else return False.
"""
return is_ipv4(str_value) or is_ipv6(str_value)
class TgnError(Exception):
""" Base exception for traffic generator exceptions. """
pass
|
shmir/PyTrafficGenerator | trafficgenerator/tgn_object.py | TgnObjectsDict.dumps | python | def dumps(self, indent=1):
str_keys_dict = OrderedDict({str(k): v for k, v in self.items()})
for k, v in str_keys_dict.items():
if isinstance(v, dict):
str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()})
for k1, v1 in str_keys_dict[k].items():
if isinstance(v1, dict):
str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()})
return json.dumps(str_keys_dict, indent=indent) | Returns nested string representation of the dictionary (like json.dumps).
:param indent: indentation level. | train | https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L44-L57 | null | class TgnObjectsDict(OrderedDict):
""" Dictionary to map from TgnObjects to whatever data.
Dictionary keys must be TgnObject but then it can be accessed by the object itself, the object reference or the
object name.
"""
def __setitem__(self, key, value):
if not isinstance(key, TgnObject):
raise TgnError('tgn_object_dict keys must be TgnObject, not {}'.format(type(key)))
return OrderedDict.__setitem__(self, key, value)
def __getitem__(self, key):
if key in self.keys():
return OrderedDict.__getitem__(self, key)
else:
for obj in self:
if obj.name == key or obj.ref == key:
return OrderedDict.__getitem__(self, obj)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.