docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Retrieves a specific data type definition by name.
Args:
name (str): name of the data type definition.
Returns:
DataTypeDefinition: data type definition or None if not available. | def GetDefinitionByName(self, name):
lookup_name = name.lower()
if lookup_name not in self._definitions:
lookup_name = self._aliases.get(name, None)
return self._definitions.get(lookup_name, None) | 814,332 |
Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name. | def RegisterDefinition(self, data_type_definition):
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise... | 814,333 |
Set dictionary items but do not allow setting of resources
Args:
key (Any): Key in dictionary
value (Any): Value to put in dictionary
Returns:
None | def __setitem__(self, key, value):
# type: (Any, Any) -> None
if key == 'resources':
self.add_update_resources(value, ignore_datasetid=True)
return
super(Dataset, self).__setitem__(key, value) | 814,351 |
Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
Returns:
hdx.data.resource.Resource: Resource object | def _get_resource_from_obj(self, resource):
# type: (Union[hdx.data.resource.Resource,Dict,str]) -> hdx.data.resource.Resource
if isinstance(resource, str):
if is_valid_uuid(resource) is False:
raise HDXError('%s is not a valid resource id!' % resource)
r... | 814,353 |
Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
ignore_datasetid (bool): Whether to ignore dataset id in the resource
Ret... | def add_update_resource(self, resource, ignore_datasetid=False):
# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> None
resource = self._get_resource_from_obj(resource)
if 'package_id' in resource:
if not ignore_datasetid:
raise HDXError('Resource... | 814,354 |
Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset i... | def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_updat... | 814,355 |
Delete a resource from the dataset and also from HDX by default
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defa... | def delete_resource(self, resource, delete=True):
# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool
if isinstance(resource, str):
if is_valid_uuid(resource) is False:
raise HDXError('%s is not a valid resource id!' % resource)
return self._re... | 814,356 |
Reorder resources in dataset according to provided list.
If only some resource ids are supplied then these are
assumed to be first and the other resources will stay in
their original order.
Args:
resource_ids (List[str]): List of resource ids
hxl_update (bool): W... | def reorder_resources(self, resource_ids, hxl_update=True):
# type: (List[str], bool) -> None
dataset_id = self.data.get('id')
if not dataset_id:
raise HDXError('Dataset has no id! It must be read, created or updated first.')
data = {'id': dataset_id,
... | 814,357 |
Update dataset metadata with static metadata from YAML file
Args:
path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml.
Returns:
None | def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')):
# type: (str) -> None
super(Dataset, self).update_from_yaml(path)
self.separate_resources() | 814,358 |
Update dataset metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.
Returns:
None | def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):
# type: (str) -> None
super(Dataset, self).update_from_json(path)
self.separate_resources() | 814,359 |
Reads the dataset given by identifier from HDX and returns Dataset object
Args:
identifier (str): Identifier of dataset
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Dataset]: Dataset object if succes... | def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Dataset']
dataset = Dataset(configuration=configuration)
result = dataset._dataset_load_from_hdx(identifier)
if result:
return dataset
return None | 814,360 |
Loads the dataset given by either id or name from HDX
Args:
id_or_name (str): Either id or name of dataset
Returns:
bool: True if loaded, False if not | def _dataset_load_from_hdx(self, id_or_name):
# type: (str) -> bool
if not self._load_from_hdx('dataset', id_or_name):
return False
self._dataset_create_resources()
return True | 814,362 |
Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Wh... | def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
# type: (List[str], bool) -> None
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fiel... | 814,363 |
Helper method to add new resource from dataset including filestore.
Args:
new_resource (hdx.data.Resource): New resource from dataset
ignore_fields (List[str]): List of fields to ignore when checking resource
filestore_resources (List[hdx.data.Resource]): List of resources t... | def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources):
# type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None
new_resource.check_required_fields(ignore_fields=ignore_fields)
self.resources.append(new_resource)
if new_re... | 814,365 |
Helper method to create files in filestore by updating resources.
Args:
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
create_default_views (bool): Whether to call package_create_default_resource_views.
hxl_update (boo... | def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update):
# type: (List[hdx.data.Resource], bool, bool) -> None
for resource in filestore_resources:
for created_resource in self.data['resources']:
if resource['name'] == created_resour... | 814,366 |
Get all dataset names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
limit (int): Number of rows to return. Defaults to all dataset names.
offset (int): Offset in the complete result ... | def get_all_dataset_names(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List[str]
dataset = Dataset(configuration=configuration)
dataset['id'] = 'all dataset names' # only for error message if produced
return dataset._write_to_hdx('list', kwargs, 'id'... | 814,371 |
Get all resources from a list of datasets (such as returned by search)
Args:
datasets (List[Dataset]): list of datasets
Returns:
List[hdx.data.resource.Resource]: list of resources within those datasets | def get_all_resources(datasets):
# type: (List['Dataset']) -> List[hdx.data.resource.Resource]
resources = []
for dataset in datasets:
for resource in dataset.get_resources():
resources.append(resource)
return resources | 814,373 |
Get supplied dataset date as string in specified format.
If no format is supplied, an ISO 8601 string is returned.
Args:
dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format
date_format (Optional[str]): Date format. None is taken to be ISO 8601.... | def _get_formatted_date(dataset_date, date_format=None):
# type: (Optional[datetime], Optional[str]) -> Optional[str]
if dataset_date:
if date_format:
return dataset_date.strftime(date_format)
else:
return dataset_date.date().isoformat()
... | 814,376 |
Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date s... | def get_dataset_date(self, date_format=None):
# type: (Optional[str]) -> Optional[str]
dataset_date = self.get_dataset_date_as_datetime()
return self._get_formatted_date(dataset_date, date_format) | 814,377 |
Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date s... | def get_dataset_end_date(self, date_format=None):
# type: (Optional[str]) -> Optional[str]
dataset_date = self.get_dataset_end_date_as_datetime()
return self._get_formatted_date(dataset_date, date_format) | 814,378 |
Set dataset date from datetime.datetime object
Args:
dataset_date (datetime.datetime): Dataset date
dataset_end_date (Optional[datetime.datetime]): Dataset end date
Returns:
None | def set_dataset_date_from_datetime(self, dataset_date, dataset_end_date=None):
# type: (datetime, Optional[datetime]) -> None
start_date = dataset_date.strftime('%m/%d/%Y')
if dataset_end_date is None:
self.data['dataset_date'] = start_date
else:
end_date... | 814,379 |
Parse dataset date from string using specified format. If no format is supplied, the function will guess.
For unambiguous formats, this should be fine.
Args:
dataset_date (str): Dataset date string
date_format (Optional[str]): Date format. If None is given, will attempt to guess... | def _parse_date(dataset_date, date_format):
# type: (str, Optional[str]) -> datetime
if date_format is None:
try:
return parser.parse(dataset_date)
except (ValueError, OverflowError) as e:
raisefrom(HDXError, 'Invalid dataset date!', e)
... | 814,380 |
Set dataset date from string using specified format. If no format is supplied, the function will guess.
For unambiguous formats, this should be fine.
Args:
dataset_date (str): Dataset date string
dataset_end_date (Optional[str]): Dataset end date string
date_format (... | def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):
# type: (str, Optional[str], Optional[str]) -> None
parsed_date = self._parse_date(dataset_date, date_format)
if dataset_end_date is None:
self.set_dataset_date_from_datetime(parsed_date)
... | 814,381 |
Set dataset date as a range from year or start and end year.
Args:
dataset_year (Union[str, int]): Dataset year given as string or int
dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int
Returns:
None | def set_dataset_year_range(self, dataset_year, dataset_end_year=None):
# type: (Union[str, int], Optional[Union[str, int]]) -> None
if isinstance(dataset_year, int):
dataset_date = '01/01/%d' % dataset_year
elif isinstance(dataset_year, str):
dataset_date = '01/0... | 814,382 |
Set expected update frequency
Args:
update_frequency (str): Update frequency
Returns:
None | def set_expected_update_frequency(self, update_frequency):
# type: (str) -> None
try:
int(update_frequency)
except ValueError:
update_frequency = Dataset.transform_update_frequency(update_frequency)
if not update_frequency:
raise HDXError('Inv... | 814,384 |
Remove a tag
Args:
tag (str): Tag to remove
Returns:
bool: True if tag removed or False if not | def remove_tag(self, tag):
# type: (str) -> bool
return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name') | 814,385 |
Return the dataset's location
Args:
locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.
Returns:
List[str]: list of locations or [] if there are none | def get_location(self, locations=None):
# type: (Optional[List[str]]) -> List[str]
countries = self.data.get('groups', None)
if not countries:
return list()
return [Locations.get_location_from_HDX_code(x['name'], locations=locations,
... | 814,386 |
Remove a location. If the location is already added, it is ignored.
Args:
location (str): Location to remove
Returns:
bool: True if location removed or False if not | def remove_location(self, location):
# type: (str) -> bool
res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name')
if not res:
res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name')
if not res:
res ... | 814,391 |
Set the dataset's maintainer.
Args:
maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.
Returns:
None | def set_maintainer(self, maintainer):
# type: (Union[hdx.data.user.User,Dict,str]) -> None
if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict):
if 'id' not in maintainer:
maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], conf... | 814,393 |
Set the dataset's organization.
Args:
organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary.
Returns:
None | def set_organization(self, organization):
# type: (Union[hdx.data.organization.Organization,Dict,str]) -> None
if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict):
if 'id' not in organization:
organization = hdx.data.orga... | 814,395 |
Get dataset showcase dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
dict: dataset showcase dict | def _get_dataset_showcase_dict(self, showcase):
# type: (Union[hdx.data.showcase.Showcase, Dict,str]) -> Dict
if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict):
if 'id' not in showcase:
showcase = hdx.data.showcase.Showcase.read_from_h... | 814,397 |
Add dataset to showcase
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary
showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset... | def add_showcase(self, showcase, showcases_to_check=None):
# type: (Union[hdx.data.showcase.Showcase,Dict,str], List[hdx.data.showcase.Showcase]) -> bool
dataset_showcase = self._get_dataset_showcase_dict(showcase)
if showcases_to_check is None:
showcases_to_check = self.get... | 814,398 |
Add dataset to multiple showcases
Args:
showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries
showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to s... | def add_showcases(self, showcases, showcases_to_check=None):
# type: (List[Union[hdx.data.showcase.Showcase,Dict,str]], List[hdx.data.showcase.Showcase]) -> bool
if showcases_to_check is None:
showcases_to_check = self.get_showcases()
allshowcasesadded = True
for sho... | 814,399 |
Remove dataset from showcase
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary
Returns:
None | def remove_showcase(self, showcase):
# type: (Union[hdx.data.showcase.Showcase,Dict,str]) -> None
dataset_showcase = self._get_dataset_showcase_dict(showcase)
showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)
showcas... | 814,400 |
Set the dataset to be of type requestable or not
Args:
requestable (bool): Set whether dataset is requestable. Defaults to True.
Returns:
None | def set_requestable(self, requestable=True):
# type: (bool) -> None
self.data['is_requestdata_type'] = requestable
if requestable:
self.data['private'] = False | 814,401 |
Set the resource that will be used for displaying QuickCharts in dataset preview
Args:
resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position
Returns:
bool: Returns True if reso... | def set_quickchart_resource(self, resource):
# type: (Union[hdx.data.resource.Resource,Dict,str,int]) -> bool
if isinstance(resource, int) and not isinstance(resource, bool):
resource = self.get_resources()[resource]
if isinstance(resource, hdx.data.resource.Resource) or isi... | 814,404 |
Create default resource views for all resources in dataset
Args:
create_datastore_views (bool): Whether to try to create resource views that point to the datastore
Returns:
None | def create_default_views(self, create_datastore_views=False):
# type: (bool) -> None
package = deepcopy(self.data)
if self.resources:
package['resources'] = self._convert_hdxobjects(self.resources)
data = {'package': package, 'create_datastore_views': create_datasto... | 814,405 |
Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method | def call_remoteckan(self, *args, **kwargs):
# type: (Any, Any) -> Dict
requests_kwargs = kwargs.get('requests_kwargs', dict())
credentials = self._get_credentials()
if credentials:
requests_kwargs['auth'] = credentials
kwargs['requests_kwargs'] = requests_kwa... | 814,408 |
Overwrite keyword arguments with environment variables
Args:
**kwargs: See below
hdx_url (str): HDX url to use. Overrides hdx_site.
hdx_site (str): HDX site to use eg. prod, test. Defaults to test.
hdx_key (str): Your HDX key. Ignored if hdx_read_only = True.
... | def _environment_variables(**kwargs):
# type: (Any) -> Any
hdx_key = os.getenv('HDX_KEY')
if hdx_key is not None:
kwargs['hdx_key'] = hdx_key
hdx_url = os.getenv('HDX_URL')
if hdx_url is not None:
kwargs['hdx_url'] = hdx_url
else:
... | 814,409 |
Set up remote CKAN from provided CKAN or by creating from configuration
Args:
remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration.
Returns:
None | def setup_remoteckan(self, remoteckan=None, **kwargs):
# type: (Optional[ckanapi.RemoteCKAN], Any) -> None
if remoteckan is None:
self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(),
... | 814,411 |
Reads the resource view given by identifier from HDX and returns ResourceView object
Args:
identifier (str): Identifier of resource view
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[ResourceView]: Re... | def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['ResourceView']
resourceview = ResourceView(configuration=configuration)
result = resourceview._load_from_hdx('resource view', identifier)
if result:
return resourc... | 814,428 |
Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects
Args:
identifier (str): Identifier of resource
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[Re... | def get_all_for_resource(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> List['ResourceView']
resourceview = ResourceView(configuration=configuration)
success, result = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['lis... | 814,429 |
Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None | def copy(self, resource_view):
# type: (Union[ResourceView,Dict,str]) -> None
if isinstance(resource_view, str):
if is_valid_uuid(resource_view) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
resource_view = ResourceView.r... | 814,432 |
Reads the user given by identifier from HDX and returns User object
Args:
identifier (str): Identifier of user
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[User]: User object if successful read, None... | def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['User']
user = User(configuration=configuration)
result = user._load_from_hdx('user', identifier)
if result:
return user
return None | 814,436 |
Emails a user.
Args:
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
**kwargs: See below
mail_options (List): Mail option... | def email(self, subject, text_body, html_body=None, sender=None, **kwargs):
# type: (str, str, Optional[str], Optional[str], Any) -> None
self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender,
**k... | 814,439 |
Get all users in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
q (str): Restrict to names containing a string. Defaults to all users.
order_by (str): Field by which to sort - any user f... | def get_all_users(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List['User']
user = User(configuration=configuration)
user['id'] = 'all users' # only for error message if produced
result = user._write_to_hdx('list', kwargs, 'id')
users = list(... | 814,440 |
Get organizations in HDX that this user is a member of.
Args:
permission (str): Permission to check for. Defaults to 'read'.
Returns:
List[Organization]: List of organizations in HDX that this user is a member of | def get_organizations(self, permission='read'):
# type: (str) -> List['Organization']
success, result = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'],
permission=permission)
organizations = list()
if... | 814,442 |
Facade to simplify project setup that calls project main function
Args:
projectmainfn ((None) -> None): main function of project
**kwargs: configuration parameters to pass to HDX Configuration class
Returns:
None | def facade(projectmainfn, **kwargs):
# (Callable[[None], None], Any) -> None
#
# Setting up configuration
#
site_url = Configuration._create(**kwargs)
logger.info('--------------------------------------------------')
logger.info('> Using HDX Python API Library %s' % Configuration.apiv... | 814,446 |
Initializes a Python struct-base byte stream operation.
Args:
format_string (str): format string as used by Python struct.
Raises:
FormatError: if the struct operation cannot be determined from the data
type definition. | def __init__(self, format_string):
try:
struct_object = struct.Struct(format_string)
except (TypeError, struct.error) as exception:
raise errors.FormatError((
'Unable to create struct object from data type definition '
'with error: {0!s}').format(exception))
super(Struc... | 814,456 |
Read values from a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
tuple[object, ...]: values copies from the byte stream.
Raises:
IOError: if byte stream cannot be read.
OSError: if byte stream cannot be read. | def ReadFrom(self, byte_stream):
try:
return self._struct.unpack_from(byte_stream)
except (TypeError, struct.error) as exception:
raise IOError('Unable to read byte stream with error: {0!s}'.format(
exception)) | 814,457 |
Writes values to a byte stream.
Args:
values (tuple[object, ...]): values to copy to the byte stream.
Returns:
bytes: byte stream.
Raises:
IOError: if byte stream cannot be written.
OSError: if byte stream cannot be read. | def WriteTo(self, values):
try:
return self._struct.pack(*values)
except (TypeError, struct.error) as exception:
raise IOError('Unable to write stream with error: {0!s}'.format(
exception)) | 814,458 |
Reads the showcase given by identifier from HDX and returns Showcase object
Args:
identifier (str): Identifier of showcase
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Showcase]: Showcase object if s... | def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Showcase']
showcase = Showcase(configuration=configuration)
result = showcase._load_from_hdx('showcase', identifier)
if result:
return showcase
return None | 814,480 |
Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict | def _get_showcase_dataset_dict(self, dataset):
# type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict
if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):
if 'id' not in dataset:
dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['... | 814,482 |
Add a dataset
Args:
dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns... | def add_dataset(self, dataset, datasets_to_check=None):
# type: (Union[hdx.data.dataset.Dataset,Dict,str], List[hdx.data.dataset.Dataset]) -> bool
showcase_dataset = self._get_showcase_dataset_dict(dataset)
if datasets_to_check is None:
datasets_to_check = self.get_datasets(... | 814,483 |
Add multiple datasets
Args:
datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
... | def add_datasets(self, datasets, datasets_to_check=None):
# type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
alldatasetsadded = True
for dataset in dat... | 814,484 |
Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2... | def append(self, *values):
for value in values:
list.append(self, value)
return self | 814,488 |
Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>... | def extend(self, *iterables):
for value in iterables:
list.extend(self, value)
return self | 814,489 |
Process value for writing into a cell.
Args:
value: any type of variable
Returns:
json serialized value if value is list or dict, else value | def normalize_cell_value(value):
if isinstance(value, dict) or isinstance(value, list):
return json.dumps(value)
return value | 814,490 |
Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension). | def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):
workbook = create_excel_workbook(data, result_info_key, identifier_keys)
workbook.save(output_file_name)
print('Saved Excel file to {}'.format(output_file_name)) | 814,590 |
Creates CSV files containing data returned by the Analytics API.
Creates one file per requested endpoint and saves it into the
specified output_folder
Args:
data: Analytics API data as a list of dicts
output_folder: Path to a folder to save the CSV files into | def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys):
workbook = create_excel_workbook(data, result_info_key, identifier_keys)
suffix = '.csv'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for worksheet in workbook.worksheets:
... | 814,591 |
Adjust column width in worksheet.
Args:
worksheet: worksheet to be adjusted | def adjust_column_width(worksheet):
dims = {}
padding = 1
for row in worksheet.rows:
for cell in row:
if not cell.value:
continue
dims[cell.column] = max(
dims.get(cell.column, 0),
len(str(cell.value))
)
for... | 814,600 |
Factory for creating the correct type of Response based on the data.
Args:
endpoint_name (str) - The endpoint of the request, such as "property/value"
json_body - The response body in json format.
original_response (response object) - server response returned from an http req... | def create(cls, endpoint_name, json_body, original_response):
if endpoint_name == "property/value_report":
return ValueReportResponse(endpoint_name, json_body, original_response)
if endpoint_name == "property/rental_report":
return RentalReportResponse(endpoint_name, j... | 814,605 |
Call the value_report component
Value Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- report_type - "full" or "summary". Default is "full".
- format_type - "json", "pdf", "xlsx" or "all". Default is "json". | def value_report(self, address, zipcode, report_type="full", format_type="json"):
query_params = {
"report_type": report_type,
"format": format_type,
"address": address,
"zipcode": zipcode
}
return self._api_client.fetch_synchronous("prop... | 814,724 |
Call the rental_report component
Rental Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- format_type - "json", "xlsx" or "all". Default is "json". | def rental_report(self, address, zipcode, format_type="json"):
# only json is supported by rental report.
query_params = {
"format": format_type,
"address": address,
"zipcode": zipcode
}
return self._api_client.fetch_synchronous("property/re... | 814,725 |
Call the zip component_mget endpoint
Args:
- zip_data - As described in the class docstring.
- components - A list of strings for each component to include in the request.
Example: ["zip/details", "zip/volatility"] | def component_mget(self, zip_data, components):
if not isinstance(components, list):
print("Components param must be a list")
return
query_params = {"components": ",".join(components)}
return self.fetch_identifier_component(
"zip/component_mget", zi... | 814,728 |
Deserialize property json data into a Property object
Args:
json_data (dict): The json data for this property
Returns:
Property object | def create_from_json(cls, json_data):
prop = Property()
address_info = json_data["address_info"]
prop.address = address_info["address"]
prop.block_id = address_info["block_id"]
prop.zipcode = address_info["zipcode"]
prop.zipcode_plus4 = address_info["zipcode_plus... | 814,744 |
Deserialize block json data into a Block object
Args:
json_data (dict): The json data for this block
Returns:
Block object | def create_from_json(cls, json_data):
block = Block()
block_info = json_data["block_info"]
block.block_id = block_info["block_id"]
block.num_bins = block_info["num_bins"] if "num_bins" in block_info else None
block.property_type = block_info["property_type"] if "property... | 814,746 |
Deserialize zipcode json data into a ZipCode object
Args:
json_data (dict): The json data for this zipcode
Returns:
Zip object | def create_from_json(cls, json_data):
zipcode = ZipCode()
zipcode.zipcode = json_data["zipcode_info"]["zipcode"]
zipcode.meta = json_data["meta"] if "meta" in json_data else None
zipcode.component_results = _create_component_results(json_data, "zipcode_info")
return zi... | 814,748 |
Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object | def create_from_json(cls, json_data):
msa = Msa()
msa.msa = json_data["msa_info"]["msa"]
msa.meta = json_data["meta"] if "meta" in json_data else None
msa.component_results = _create_component_results(json_data, "msa_info")
return msa | 814,750 |
Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
... | def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable) | 814,755 |
Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
... | def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable) | 814,756 |
Return key, self[key] as generator for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))
[(1, 1), (3, 3)] | def isubset(self, *keys):
# type: (*Hashable) -> ww.g
return ww.g((key, self[key]) for key in keys) | 814,849 |
Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items... | def fromkeys(cls, iterable, value=None):
# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper
# https://github.com/python/mypy/issues/2254
if not callable(value):
return cls(dict.fromkeys(iterable, value))
return cls((key, value(key)) for key in iterable) | 814,851 |
Add other in self and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = {3: 4, 4: 5}
>>> current_dict + to_merge_dict
... | def __add__(self, other):
# type: (dict) -> DictWrapper
copy = self.__class__(self.copy())
return copy.merge(other) | 814,852 |
Add other in self, and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = {1: 1, 2: 2, 3: 3}
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict + to_merge_dict
... | def __radd__(self, other):
# type: (dict) -> DictWrapper
copy = self.__class__(other.copy())
return copy.merge(self) | 814,853 |
Generates an Excel workbook object given api_data returned by the Analytics API
Args:
api_data: Analytics API data as a list of dicts (one per identifier)
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifier... | def get_excel_workbook(api_data, result_info_key, identifier_keys):
cleaned_data = []
for item_data in api_data:
result_info = item_data.pop(result_info_key, {})
cleaned_item_data = {}
if 'meta' in item_data:
meta = item_data.pop('meta')
cleaned_item_data... | 814,855 |
Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
... | def write_worksheets(workbook, data_list, result_info_key, identifier_keys):
# we can use the first item to figure out the worksheet keys
worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)
for key in worksheet_keys:
title = key.split('/')[1]
title = utilities.convert... | 814,856 |
Gets sorted keys from the dict, ignoring result_info_key and 'meta' key
Args:
data_dict: dict to pull keys from
Returns:
list of keys in the dict other than the result_info_key | def get_worksheet_keys(data_dict, result_info_key):
keys = set(data_dict.keys())
keys.remove(result_info_key)
if 'meta' in keys:
keys.remove('meta')
return sorted(keys) | 814,858 |
Gets all possible keys from a list of dicts, sorting by leading_columns first
Args:
data_list: list of dicts to pull keys from
leading_columns: list of keys to put first in the result
Returns:
list of keys to be included as columns in excel worksheet | def get_keys(data_list, leading_columns=LEADING_COLUMNS):
all_keys = set().union(*(list(d.keys()) for d in data_list))
leading_keys = []
for key in leading_columns:
if key not in all_keys:
continue
leading_keys.append(key)
all_keys.remove(key)
return leading_k... | 814,859 |
Writes data into worksheet.
Args:
worksheet: worksheet to write into
data: data to be written | def write_data(worksheet, data):
if not data:
return
if isinstance(data, list):
rows = data
else:
rows = [data]
if isinstance(rows[0], dict):
keys = get_keys(rows)
worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])
for ro... | 814,860 |
Helper method to flatten a nested dict of dicts (one level)
Example:
{'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'}
The separator '_-_' gets formatted later for the column headers
Args:
data: the dict to flatten
top_level_keys: a list of the top level key... | def flatten_top_level_keys(data, top_level_keys):
flattened_data = {}
for top_level_key in top_level_keys:
if data[top_level_key] is None:
flattened_data[top_level_key] = None
else:
for key in data[top_level_key]:
flattened_data['{}_-_{}'.format(top_... | 814,863 |
Create an authentication handler for HouseCanary API V1 requests
Args:
auth_key (string) - The HouseCanary API auth key
auth_secret (string) - The HouseCanary API secret | def __init__(self, auth_key, auth_secret):
self._auth_key = auth_key
self._auth_secret = auth_secret | 815,084 |
returns a blocking generator yielding Slack event objects
params:
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- idle_timeout(int): optional maximum amount of t... | def events(self, *etypes, idle_timeout=None):
while self._state != STATE_STOPPED:
try:
yield self.get_event(*etypes, timeout=idle_timeout)
except Queue.Empty:
log.info('idle timeout reached for events()')
return | 815,365 |
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning. | def send_msg(self, text, channel, confirm=True):
self._send_id += 1
msg = SlackMsg(self._send_id, channel.id, text)
self.ws.send(msg.json)
self._stats['messages_sent'] += 1
if confirm:
# Wait for confirmation our message was received
for e in se... | 815,366 |
Convert a 2D feature to a 3D feature by sampling a raster
Parameters:
raster (rasterio): raster to provide the z coordinate
feature (dict): fiona feature record to convert
Returns:
result (Point or Linestring): shapely Point or LineString of xyz coordinate triples | def drape(raster, feature):
coords = feature['geometry']['coordinates']
geom_type = feature['geometry']['type']
if geom_type == 'Point':
xyz = sample(raster, [coords])
result = Point(xyz[0])
elif geom_type == 'LineString':
xyz = sample(raster, coords)
points = [Poin... | 815,849 |
Sample a raster at given coordinates
Given a list of coordinates, return a list of x,y,z triples with z coordinates sampled from an input raster
Parameters:
raster (rasterio): raster dataset to sample
coords: array of tuples containing coordinate pairs (x,y) or triples (x,y,z)
Returns:
... | def sample(raster, coords):
if len(coords[0]) == 3:
logging.info('Input is a 3D geometry, z coordinate will be updated.')
z = raster.sample([(x, y) for x, y, z in coords], indexes=raster.indexes)
else:
z = raster.sample(coords, indexes=raster.indexes)
result = [(vert[0], vert[1... | 815,850 |
Annotate locations in a string that contain
periods as being true periods or periods
that are a part of shorthand (and thus should
not be treated as punctuation marks).
Arguments:
----------
text : str
split_locations : list<int>, same length as text. | def protect_shorthand(text, split_locations):
word_matches = list(re.finditer(word_with_period, text))
total_words = len(word_matches)
for i, match in enumerate(word_matches):
match_start = match.start()
match_end = match.end()
for char_pos in range(match_start, match_end):
... | 816,709 |
Use an integer list to split the string
contained in `text`.
Arguments:
----------
text : str, same length as locations.
locations : list<int>, contains values
'SHOULD_SPLIT', 'UNDECIDED', and
'SHOULD_NOT_SPLIT'. Will create
strings between each 'SHOULD_S... | def split_with_locations(text, locations):
start = 0
for pos, decision in enumerate(locations):
if decision == SHOULD_SPLIT:
if start != pos:
yield text[start:pos]
start = pos
if start != len(text):
yield text[start:] | 816,710 |
Convert a single string into a list of substrings
split along punctuation and word boundaries. Keep
whitespace intact by always attaching it to the
previous token.
Arguments:
----------
text : str
normalize_ascii : bool, perform some replacements
on non-ascii characters ... | def tokenize(text, normalize_ascii=True):
# 1. If there's no punctuation, return immediately
if no_punctuation.match(text):
return [text]
# 2. let's standardize the input text to ascii (if desired)
# Note: this will no longer respect input-to-output character positions
if normalize_asci... | 816,713 |
Description:
Set the input
Call with no arguments to get current setting
Arguments:
opt: string
Name provided from input list or key from yaml ("HDMI 1" or "hdmi_1") | def input(self, opt):
for key in self.command['input']:
if (key == opt) or (self.command['input'][key]['name'] == opt):
return self._send_command(['input', key, 'command'])
return False | 817,017 |
Description:
Change Channel (Digital)
Pass Channels "XX.YY" as TV.digital_channel_air(XX, YY)
Arguments:
opt1: integer
1-99: Major Channel
opt2: integer (optional)
1-99: Minor Channel | def digital_channel_air(self, opt1='?', opt2='?'):
if opt1 == '?':
parameter = '?'
elif opt2 == '?':
parameter = str(opt1).rjust(4, "0")
else:
parameter = '{:02d}{:02d}'.format(opt1, opt2)
return self._send_command('digital_channel_air', param... | 817,018 |
Description:
Change Channel (Digital)
Pass Channels "XXX.YYY" as TV.digital_channel_cable(XXX, YYY)
Arguments:
opt1: integer
1-999: Major Channel
opt2: integer (optional)
0-999: Minor Channel | def digital_channel_cable(self, opt1='?', opt2=0):
if opt1 == '?':
parameter = '?'
elif self.command['digital_channel_cable_minor'] == '':
parameter = str(opt1).rjust(4, "0")
else:
self._send_command('digital_channel_cable_minor', str(opt1).rjust(3, "... | 817,019 |
Perform aggreration
Arguments:
:_aggregations_params: Dict of aggregation params. Root key is an
aggregation name. Required.
:_raise_on_empty: Boolean indicating whether to raise exception
when IndexNotFoundException exception happens. Optional,
... | def aggregate(self, **params):
_aggregations_params = params.pop('_aggregations_params', None)
_raise_on_empty = params.pop('_raise_on_empty', False)
if not _aggregations_params:
raise Exception('Missing _aggregations_params')
# Set limit so ES won't complain. It i... | 817,191 |
Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI | def _set_options_headers(self, methods):
request = self.request
response = request.response
response.headers['Allow'] = ', '.join(sorted(methods))
if 'Access-Control-Request-Method' in request.headers:
response.headers['Access-Control-Allow-Methods'] = \
... | 817,304 |
Get names of HTTP methods that can be used at requested URI.
Arguments:
:actions_map: Map of actions. Must have the same structure as
self._item_actions and self._collection_actions | def _get_handled_methods(self, actions_map):
methods = ('OPTIONS',)
defined_actions = []
for action_name in actions_map.keys():
view_method = getattr(self, action_name, None)
method_exists = view_method is not None
method_defined = view_method != sel... | 817,305 |
Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==10... | def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):
rendered_list = []
i = 0
total_attempts = 0
while True:
if i >= cnt:
break
if total_attempts > cnt * self.unique_attempts_factor:
raise St... | 818,704 |
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str | def get_path(url):
url = urlsplit(url)
path = url.path
if url.query:
path += "?{}".format(url.query)
return path | 818,848 |
Constructor
Args:
content (str): Markdown text
css (str): Custom CSS style. If not set, use default CSS style.
image_root (str): Root directory for inline images. | def __init__(self, content, css=None, image_root='.'):
self._md = markdown.Markdown(extensions=[
'markdown.extensions.tables',
'markdown.extensions.meta'])
self._html = None
self._inline_images = None
self._convert(content, css, image_root) | 818,920 |
Initialize object and creates the week day map.
Args:
workdays: List or tuple of week days considered 'work days'.
Anything not in this list is considered a rest day.
Defaults to [MO, TU, WE, TH, FR].
holidays: List or tuple of holidays (or strings)... | def __init__(self, workdays=None, holidays=None):
if workdays is None:
self.workdays = [MO, TU, WE, TH, FR]
else:
self.workdays = sorted(list(set(workdays))) # sorted and unique
if holidays is None:
holidays = []
# create week day ... | 818,993 |
Check if a given date is a work date, ignoring holidays.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a work date, False otherwise. | def isworkday(self, date):
date = parsefun(date)
return self.weekdaymap[date.weekday()].isworkday | 818,994 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.