repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
service_post_save
def service_post_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check service if instance.is_monitored and settings.REGISTRY_SKIP_CELERY: check_service(instance.id) elif instance.is_monitored: check_service.delay(instance.id)
python
def service_post_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check service if instance.is_monitored and settings.REGISTRY_SKIP_CELERY: check_service(instance.id) elif instance.is_monitored: check_service.delay(instance.id)
[ "def", "service_post_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check service", "if", "instance", ".", "is_monitored", "and", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "check_service", "(", "instance", ".", "id", ")", "elif", "instance", ".", "is_monitored", ":", "check_service", ".", "delay", "(", "instance", ".", "id", ")" ]
Used to do a service full check when saving it.
[ "Used", "to", "do", "a", "service", "full", "check", "when", "saving", "it", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1871-L1880
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
layer_pre_save
def layer_pre_save(instance, *args, **kwargs): """ Used to check layer validity. """ is_valid = True # we do not need to check validity for WM layers if not instance.service.type == 'Hypermap:WorldMap': # 0. a layer is invalid if its service its invalid as well if not instance.service.is_valid: is_valid = False LOGGER.debug('Layer with id %s is marked invalid because its service is invalid' % instance.id) # 1. a layer is invalid with an extent within (-2, -2, +2, +2) if instance.bbox_x0 > -2 and instance.bbox_x1 < 2 and instance.bbox_y0 > -2 and instance.bbox_y1 < 2: is_valid = False LOGGER.debug( 'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)' % instance.id ) instance.is_valid = is_valid
python
def layer_pre_save(instance, *args, **kwargs): """ Used to check layer validity. """ is_valid = True # we do not need to check validity for WM layers if not instance.service.type == 'Hypermap:WorldMap': # 0. a layer is invalid if its service its invalid as well if not instance.service.is_valid: is_valid = False LOGGER.debug('Layer with id %s is marked invalid because its service is invalid' % instance.id) # 1. a layer is invalid with an extent within (-2, -2, +2, +2) if instance.bbox_x0 > -2 and instance.bbox_x1 < 2 and instance.bbox_y0 > -2 and instance.bbox_y1 < 2: is_valid = False LOGGER.debug( 'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)' % instance.id ) instance.is_valid = is_valid
[ "def", "layer_pre_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "is_valid", "=", "True", "# we do not need to check validity for WM layers", "if", "not", "instance", ".", "service", ".", "type", "==", "'Hypermap:WorldMap'", ":", "# 0. a layer is invalid if its service its invalid as well", "if", "not", "instance", ".", "service", ".", "is_valid", ":", "is_valid", "=", "False", "LOGGER", ".", "debug", "(", "'Layer with id %s is marked invalid because its service is invalid'", "%", "instance", ".", "id", ")", "# 1. a layer is invalid with an extent within (-2, -2, +2, +2)", "if", "instance", ".", "bbox_x0", ">", "-", "2", "and", "instance", ".", "bbox_x1", "<", "2", "and", "instance", ".", "bbox_y0", ">", "-", "2", "and", "instance", ".", "bbox_y1", "<", "2", ":", "is_valid", "=", "False", "LOGGER", ".", "debug", "(", "'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)'", "%", "instance", ".", "id", ")", "instance", ".", "is_valid", "=", "is_valid" ]
Used to check layer validity.
[ "Used", "to", "check", "layer", "validity", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1883-L1905
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
layer_post_save
def layer_post_save(instance, *args, **kwargs): """ Used to do a layer full check when saving it. """ if instance.is_monitored and instance.service.is_monitored: # index and monitor if not settings.REGISTRY_SKIP_CELERY: check_layer.delay(instance.id) else: check_layer(instance.id) else: # just index index_layer(instance.id)
python
def layer_post_save(instance, *args, **kwargs): """ Used to do a layer full check when saving it. """ if instance.is_monitored and instance.service.is_monitored: # index and monitor if not settings.REGISTRY_SKIP_CELERY: check_layer.delay(instance.id) else: check_layer(instance.id) else: # just index index_layer(instance.id)
[ "def", "layer_post_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "is_monitored", "and", "instance", ".", "service", ".", "is_monitored", ":", "# index and monitor", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "check_layer", ".", "delay", "(", "instance", ".", "id", ")", "else", ":", "check_layer", "(", "instance", ".", "id", ")", "else", ":", "# just index", "index_layer", "(", "instance", ".", "id", ")" ]
Used to do a layer full check when saving it.
[ "Used", "to", "do", "a", "layer", "full", "check", "when", "saving", "it", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1908-L1918
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
issue_post_delete
def issue_post_delete(instance, *args, **kwargs): """ Used to do reindex layers/services when a issue is removed form them. """ LOGGER.debug('Re-adding layer/service to search engine index') if isinstance(instance.content_object, Service): if not settings.REGISTRY_SKIP_CELERY: index_service.delay(instance.content_object.id) else: index_service(instance.content_object.id) else: if not settings.REGISTRY_SKIP_CELERY: index_layer.delay(instance.content_object.id) else: index_layer(instance.content_object.id)
python
def issue_post_delete(instance, *args, **kwargs): """ Used to do reindex layers/services when a issue is removed form them. """ LOGGER.debug('Re-adding layer/service to search engine index') if isinstance(instance.content_object, Service): if not settings.REGISTRY_SKIP_CELERY: index_service.delay(instance.content_object.id) else: index_service(instance.content_object.id) else: if not settings.REGISTRY_SKIP_CELERY: index_layer.delay(instance.content_object.id) else: index_layer(instance.content_object.id)
[ "def", "issue_post_delete", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "LOGGER", ".", "debug", "(", "'Re-adding layer/service to search engine index'", ")", "if", "isinstance", "(", "instance", ".", "content_object", ",", "Service", ")", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_service", ".", "delay", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "index_service", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", ".", "delay", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "index_layer", "(", "instance", ".", "content_object", ".", "id", ")" ]
Used to do reindex layers/services when a issue is removed form them.
[ "Used", "to", "do", "reindex", "layers", "/", "services", "when", "a", "issue", "is", "removed", "form", "them", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1921-L1935
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Resource.get_checks_admin_reliability_warning_url
def get_checks_admin_reliability_warning_url(self): """ When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance """ # TODO: cache this. path = self.get_checks_admin_url() content_type = ContentType.objects.get_for_model(self) params = "?content_type__id__exact={0}&q={1}&success__exact=0".format( content_type.id, self.id ) url = path + params return url
python
def get_checks_admin_reliability_warning_url(self): """ When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance """ # TODO: cache this. path = self.get_checks_admin_url() content_type = ContentType.objects.get_for_model(self) params = "?content_type__id__exact={0}&q={1}&success__exact=0".format( content_type.id, self.id ) url = path + params return url
[ "def", "get_checks_admin_reliability_warning_url", "(", "self", ")", ":", "# TODO: cache this.", "path", "=", "self", ".", "get_checks_admin_url", "(", ")", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "self", ")", "params", "=", "\"?content_type__id__exact={0}&q={1}&success__exact=0\"", ".", "format", "(", "content_type", ".", "id", ",", "self", ".", "id", ")", "url", "=", "path", "+", "params", "return", "url" ]
When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance
[ "When", "service", "Realiability", "is", "going", "down", "users", "should", "go", "to", "the", "the", "check", "history", "to", "find", "problem", "causes", ".", ":", "return", ":", "admin", "url", "with", "check", "list", "for", "this", "instance" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L261-L275
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.update_layers
def update_layers(self): """ Update layers for a service. """ signals.post_save.disconnect(layer_post_save, sender=Layer) try: LOGGER.debug('Updating layers for service id %s' % self.id) if self.type == 'OGC:WMS': update_layers_wms(self) elif self.type == 'OGC:WMTS': update_layers_wmts(self) elif self.type == 'ESRI:ArcGIS:MapServer': update_layers_esri_mapserver(self) elif self.type == 'ESRI:ArcGIS:ImageServer': update_layers_esri_imageserver(self) elif self.type == 'Hypermap:WorldMapLegacy': update_layers_wm_legacy(self) elif self.type == 'Hypermap:WorldMap': update_layers_geonode_wm(self) elif self.type == 'Hypermap:WARPER': update_layers_warper(self) except: LOGGER.error('Error updating layers for service %s' % self.uuid) signals.post_save.connect(layer_post_save, sender=Layer)
python
def update_layers(self): """ Update layers for a service. """ signals.post_save.disconnect(layer_post_save, sender=Layer) try: LOGGER.debug('Updating layers for service id %s' % self.id) if self.type == 'OGC:WMS': update_layers_wms(self) elif self.type == 'OGC:WMTS': update_layers_wmts(self) elif self.type == 'ESRI:ArcGIS:MapServer': update_layers_esri_mapserver(self) elif self.type == 'ESRI:ArcGIS:ImageServer': update_layers_esri_imageserver(self) elif self.type == 'Hypermap:WorldMapLegacy': update_layers_wm_legacy(self) elif self.type == 'Hypermap:WorldMap': update_layers_geonode_wm(self) elif self.type == 'Hypermap:WARPER': update_layers_warper(self) except: LOGGER.error('Error updating layers for service %s' % self.uuid) signals.post_save.connect(layer_post_save, sender=Layer)
[ "def", "update_layers", "(", "self", ")", ":", "signals", ".", "post_save", ".", "disconnect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "try", ":", "LOGGER", ".", "debug", "(", "'Updating layers for service id %s'", "%", "self", ".", "id", ")", "if", "self", ".", "type", "==", "'OGC:WMS'", ":", "update_layers_wms", "(", "self", ")", "elif", "self", ".", "type", "==", "'OGC:WMTS'", ":", "update_layers_wmts", "(", "self", ")", "elif", "self", ".", "type", "==", "'ESRI:ArcGIS:MapServer'", ":", "update_layers_esri_mapserver", "(", "self", ")", "elif", "self", ".", "type", "==", "'ESRI:ArcGIS:ImageServer'", ":", "update_layers_esri_imageserver", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "update_layers_wm_legacy", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "update_layers_geonode_wm", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WARPER'", ":", "update_layers_warper", "(", "self", ")", "except", ":", "LOGGER", ".", "error", "(", "'Error updating layers for service %s'", "%", "self", ".", "uuid", ")", "signals", ".", "post_save", ".", "connect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")" ]
Update layers for a service.
[ "Update", "layers", "for", "a", "service", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L309-L336
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.check_available
def check_available(self): """ Check for availability of a service and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking service id %s' % self.id) try: title = None abstract = None keywords = [] wkt_geometry = None srs = '4326' if self.type == 'OGC:CSW': ows = CatalogueServiceWeb(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OGC:WMS': ows = get_wms_version_negotiate(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords for c in ows.contents: if ows.contents[c].parent is None: wkt_geometry = bbox2wktpolygon(ows.contents[c].boundingBoxWGS84) break if self.type == 'OGC:WMTS': ows = WebMapTileService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OSGeo:TMS': ows = TileMapService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'ESRI:ArcGIS:MapServer': esri = ArcMapService(self.url) extent, srs = get_esri_extent(esri) title = esri.mapName if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'ESRI:ArcGIS:ImageServer': esri = ArcImageService(self.url) extent, srs = get_esri_extent(esri) title = esri._json_struct['name'] if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'Hypermap:WorldMap': urllib2.urlopen(self.url) if self.type == 'Hypermap:WorldMapLegacy': urllib2.urlopen(self.url) title = 'Harvard WorldMap Legacy' if self.type == 'Hypermap:WARPER': urllib2.urlopen(self.url) # update title without raising a signal and recursion if title: self.title = title Service.objects.filter(id=self.id).update(title=title) if abstract: self.abstract = abstract Service.objects.filter(id=self.id).update(abstract=abstract) if keywords: for kw in keywords: # FIXME: persist keywords to Django model self.keywords.add(kw) if wkt_geometry: self.wkt_geometry = wkt_geometry Service.objects.filter(id=self.id).update(wkt_geometry=wkt_geometry) xml = create_metadata_record( identifier=self.id_string, source=self.url, links=[[self.type, self.url]], format=self.type, type='service', title=title, abstract=abstract, keywords=keywords, wkt_geometry=self.wkt_geometry, srs=srs ) anytexts = gen_anytext(title, abstract, keywords) Service.objects.filter(id=self.id).update(anytext=anytexts, xml=xml, csw_type='service') except Exception, e: LOGGER.error(e, exc_info=True) message = str(e) success = False end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Service checked in %s seconds, status is %s' % (response_time, success))
python
def check_available(self): """ Check for availability of a service and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking service id %s' % self.id) try: title = None abstract = None keywords = [] wkt_geometry = None srs = '4326' if self.type == 'OGC:CSW': ows = CatalogueServiceWeb(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OGC:WMS': ows = get_wms_version_negotiate(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords for c in ows.contents: if ows.contents[c].parent is None: wkt_geometry = bbox2wktpolygon(ows.contents[c].boundingBoxWGS84) break if self.type == 'OGC:WMTS': ows = WebMapTileService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OSGeo:TMS': ows = TileMapService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'ESRI:ArcGIS:MapServer': esri = ArcMapService(self.url) extent, srs = get_esri_extent(esri) title = esri.mapName if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'ESRI:ArcGIS:ImageServer': esri = ArcImageService(self.url) extent, srs = get_esri_extent(esri) title = esri._json_struct['name'] if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'Hypermap:WorldMap': urllib2.urlopen(self.url) if self.type == 'Hypermap:WorldMapLegacy': urllib2.urlopen(self.url) title = 'Harvard WorldMap Legacy' if self.type == 'Hypermap:WARPER': urllib2.urlopen(self.url) # update title without raising a signal and recursion if title: self.title = title Service.objects.filter(id=self.id).update(title=title) if abstract: self.abstract = abstract Service.objects.filter(id=self.id).update(abstract=abstract) if keywords: for kw in keywords: # FIXME: persist keywords to Django model self.keywords.add(kw) if wkt_geometry: self.wkt_geometry = wkt_geometry Service.objects.filter(id=self.id).update(wkt_geometry=wkt_geometry) xml = create_metadata_record( identifier=self.id_string, source=self.url, links=[[self.type, self.url]], format=self.type, type='service', title=title, abstract=abstract, keywords=keywords, wkt_geometry=self.wkt_geometry, srs=srs ) anytexts = gen_anytext(title, abstract, keywords) Service.objects.filter(id=self.id).update(anytext=anytexts, xml=xml, csw_type='service') except Exception, e: LOGGER.error(e, exc_info=True) message = str(e) success = False end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Service checked in %s seconds, status is %s' % (response_time, success))
[ "def", "check_available", "(", "self", ")", ":", "success", "=", "True", "start_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "message", "=", "''", "LOGGER", ".", "debug", "(", "'Checking service id %s'", "%", "self", ".", "id", ")", "try", ":", "title", "=", "None", "abstract", "=", "None", "keywords", "=", "[", "]", "wkt_geometry", "=", "None", "srs", "=", "'4326'", "if", "self", ".", "type", "==", "'OGC:CSW'", ":", "ows", "=", "CatalogueServiceWeb", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'OGC:WMS'", ":", "ows", "=", "get_wms_version_negotiate", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "for", "c", "in", "ows", ".", "contents", ":", "if", "ows", ".", "contents", "[", "c", "]", ".", "parent", "is", "None", ":", "wkt_geometry", "=", "bbox2wktpolygon", "(", "ows", ".", "contents", "[", "c", "]", ".", "boundingBoxWGS84", ")", "break", "if", "self", ".", "type", "==", "'OGC:WMTS'", ":", "ows", "=", "WebMapTileService", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'OSGeo:TMS'", ":", "ows", "=", "TileMapService", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'ESRI:ArcGIS:MapServer'", ":", "esri", "=", "ArcMapService", "(", "self", ".", "url", ")", "extent", ",", "srs", "=", "get_esri_extent", "(", "esri", ")", "title", "=", "esri", ".", "mapName", "if", "len", "(", "title", ")", "==", "0", ":", "title", "=", "get_esri_service_name", "(", "self", ".", "url", ")", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "extent", "[", "'xmin'", "]", ",", "extent", "[", "'ymin'", "]", ",", "extent", "[", "'xmax'", "]", ",", "extent", "[", "'ymax'", "]", "]", ")", "if", "self", ".", "type", "==", "'ESRI:ArcGIS:ImageServer'", ":", "esri", "=", "ArcImageService", "(", "self", ".", "url", ")", "extent", ",", "srs", "=", "get_esri_extent", "(", "esri", ")", "title", "=", "esri", ".", "_json_struct", "[", "'name'", "]", "if", "len", "(", "title", ")", "==", "0", ":", "title", "=", "get_esri_service_name", "(", "self", ".", "url", ")", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "extent", "[", "'xmin'", "]", ",", "extent", "[", "'ymin'", "]", ",", "extent", "[", "'xmax'", "]", ",", "extent", "[", "'ymax'", "]", "]", ")", "if", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "if", "self", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "title", "=", "'Harvard WorldMap Legacy'", "if", "self", ".", "type", "==", "'Hypermap:WARPER'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "# update title without raising a signal and recursion", "if", "title", ":", "self", ".", "title", "=", "title", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "title", "=", "title", ")", "if", "abstract", ":", "self", ".", "abstract", "=", "abstract", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "abstract", "=", "abstract", ")", "if", "keywords", ":", "for", "kw", "in", "keywords", ":", "# FIXME: persist keywords to Django model", "self", ".", "keywords", ".", "add", "(", "kw", ")", "if", "wkt_geometry", ":", "self", ".", "wkt_geometry", "=", "wkt_geometry", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "wkt_geometry", "=", "wkt_geometry", ")", "xml", "=", "create_metadata_record", "(", "identifier", "=", "self", ".", "id_string", ",", "source", "=", "self", ".", "url", ",", "links", "=", "[", "[", "self", ".", "type", ",", "self", ".", "url", "]", "]", ",", "format", "=", "self", ".", "type", ",", "type", "=", "'service'", ",", "title", "=", "title", ",", "abstract", "=", "abstract", ",", "keywords", "=", "keywords", ",", "wkt_geometry", "=", "self", ".", "wkt_geometry", ",", "srs", "=", "srs", ")", "anytexts", "=", "gen_anytext", "(", "title", ",", "abstract", ",", "keywords", ")", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "anytext", "=", "anytexts", ",", "xml", "=", "xml", ",", "csw_type", "=", "'service'", ")", "except", "Exception", ",", "e", ":", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "message", "=", "str", "(", "e", ")", "success", "=", "False", "end_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "end_time", "-", "start_time", "response_time", "=", "'%s.%s'", "%", "(", "delta", ".", "seconds", ",", "delta", ".", "microseconds", ")", "check", "=", "Check", "(", "content_object", "=", "self", ",", "success", "=", "success", ",", "response_time", "=", "response_time", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Service checked in %s seconds, status is %s'", "%", "(", "response_time", ",", "success", ")", ")" ]
Check for availability of a service and provide run metrics.
[ "Check", "for", "availability", "of", "a", "service", "and", "provide", "run", "metrics", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L338-L453
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.update_validity
def update_validity(self): """ Update validity of a service. """ # WM is always valid if self.type == 'Hypermap:WorldMap': return signals.post_save.disconnect(service_post_save, sender=Service) try: # some service now must be considered invalid: # 0. any service not exposed in SUPPORTED_SRS # 1. any WMTS service # 2. all of the NOAA layers is_valid = True # 0. any service not exposed in SUPPORTED_SRS if self.srs.filter(code__in=SUPPORTED_SRS).count() == 0: LOGGER.debug('Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS' % self.id) is_valid = False # 1. any WMTS service if self.type == 'OGC:WMTS': LOGGER.debug('Service with id %s is marked invalid because it is of type OGC:WMTS' % self.id) is_valid = False # 2. all of the NOAA layers if 'noaa' in self.url.lower(): LOGGER.debug('Service with id %s is marked invalid because it is from NOAA' % self.id) is_valid = False # now we save the service self.is_valid = is_valid self.save() except: LOGGER.error('Error updating validity of the service!') signals.post_save.connect(service_post_save, sender=Service)
python
def update_validity(self): """ Update validity of a service. """ # WM is always valid if self.type == 'Hypermap:WorldMap': return signals.post_save.disconnect(service_post_save, sender=Service) try: # some service now must be considered invalid: # 0. any service not exposed in SUPPORTED_SRS # 1. any WMTS service # 2. all of the NOAA layers is_valid = True # 0. any service not exposed in SUPPORTED_SRS if self.srs.filter(code__in=SUPPORTED_SRS).count() == 0: LOGGER.debug('Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS' % self.id) is_valid = False # 1. any WMTS service if self.type == 'OGC:WMTS': LOGGER.debug('Service with id %s is marked invalid because it is of type OGC:WMTS' % self.id) is_valid = False # 2. all of the NOAA layers if 'noaa' in self.url.lower(): LOGGER.debug('Service with id %s is marked invalid because it is from NOAA' % self.id) is_valid = False # now we save the service self.is_valid = is_valid self.save() except: LOGGER.error('Error updating validity of the service!') signals.post_save.connect(service_post_save, sender=Service)
[ "def", "update_validity", "(", "self", ")", ":", "# WM is always valid", "if", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "return", "signals", ".", "post_save", ".", "disconnect", "(", "service_post_save", ",", "sender", "=", "Service", ")", "try", ":", "# some service now must be considered invalid:", "# 0. any service not exposed in SUPPORTED_SRS", "# 1. any WMTS service", "# 2. all of the NOAA layers", "is_valid", "=", "True", "# 0. any service not exposed in SUPPORTED_SRS", "if", "self", ".", "srs", ".", "filter", "(", "code__in", "=", "SUPPORTED_SRS", ")", ".", "count", "(", ")", "==", "0", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# 1. any WMTS service", "if", "self", ".", "type", "==", "'OGC:WMTS'", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because it is of type OGC:WMTS'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# 2. all of the NOAA layers", "if", "'noaa'", "in", "self", ".", "url", ".", "lower", "(", ")", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because it is from NOAA'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# now we save the service", "self", ".", "is_valid", "=", "is_valid", "self", ".", "save", "(", ")", "except", ":", "LOGGER", ".", "error", "(", "'Error updating validity of the service!'", ")", "signals", ".", "post_save", ".", "connect", "(", "service_post_save", ",", "sender", "=", "Service", ")" ]
Update validity of a service.
[ "Update", "validity", "of", "a", "service", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L455-L497
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Catalog.get_search_url
def get_search_url(self): """ resolve the search url no matter if local or remote. :return: url or exception """ if self.is_remote: return self.url return reverse('search_api', args=[self.slug])
python
def get_search_url(self): """ resolve the search url no matter if local or remote. :return: url or exception """ if self.is_remote: return self.url return reverse('search_api', args=[self.slug])
[ "def", "get_search_url", "(", "self", ")", ":", "if", "self", ".", "is_remote", ":", "return", "self", ".", "url", "return", "reverse", "(", "'search_api'", ",", "args", "=", "[", "self", ".", "slug", "]", ")" ]
resolve the search url no matter if local or remote. :return: url or exception
[ "resolve", "the", "search", "url", "no", "matter", "if", "local", "or", "remote", ".", ":", "return", ":", "url", "or", "exception" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L525-L534
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.get_url_endpoint
def get_url_endpoint(self): """ Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. """ endpoint = self.url if self.type not in ('Hypermap:WorldMap',): endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % ( self.catalog.slug, self.id ) return endpoint
python
def get_url_endpoint(self): """ Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. """ endpoint = self.url if self.type not in ('Hypermap:WorldMap',): endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % ( self.catalog.slug, self.id ) return endpoint
[ "def", "get_url_endpoint", "(", "self", ")", ":", "endpoint", "=", "self", ".", "url", "if", "self", ".", "type", "not", "in", "(", "'Hypermap:WorldMap'", ",", ")", ":", "endpoint", "=", "'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml'", "%", "(", "self", ".", "catalog", ".", "slug", ",", "self", ".", "id", ")", "return", "endpoint" ]
Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
[ "Returns", "the", "Hypermap", "endpoint", "for", "a", "layer", ".", "This", "endpoint", "will", "be", "the", "WMTS", "MapProxy", "endpoint", "only", "for", "WM", "we", "use", "the", "original", "endpoint", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L573-L584
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.check_available
def check_available(self): """ Check for availability of a layer and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking layer id %s' % self.id) signals.post_save.disconnect(layer_post_save, sender=Layer) try: self.update_thumbnail() except ValueError, err: # caused by update_thumbnail() # self.href is empty in arcserver.ExportMap if str(err).startswith("unknown url type:"): LOGGER.debug('Thumbnail can not be updated: %s' % str(err)) except Exception, err: message = str(err) success = False signals.post_save.connect(layer_post_save, sender=Layer) end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success)) return success, message
python
def check_available(self): """ Check for availability of a layer and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking layer id %s' % self.id) signals.post_save.disconnect(layer_post_save, sender=Layer) try: self.update_thumbnail() except ValueError, err: # caused by update_thumbnail() # self.href is empty in arcserver.ExportMap if str(err).startswith("unknown url type:"): LOGGER.debug('Thumbnail can not be updated: %s' % str(err)) except Exception, err: message = str(err) success = False signals.post_save.connect(layer_post_save, sender=Layer) end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success)) return success, message
[ "def", "check_available", "(", "self", ")", ":", "success", "=", "True", "start_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "message", "=", "''", "LOGGER", ".", "debug", "(", "'Checking layer id %s'", "%", "self", ".", "id", ")", "signals", ".", "post_save", ".", "disconnect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "try", ":", "self", ".", "update_thumbnail", "(", ")", "except", "ValueError", ",", "err", ":", "# caused by update_thumbnail()", "# self.href is empty in arcserver.ExportMap", "if", "str", "(", "err", ")", ".", "startswith", "(", "\"unknown url type:\"", ")", ":", "LOGGER", ".", "debug", "(", "'Thumbnail can not be updated: %s'", "%", "str", "(", "err", ")", ")", "except", "Exception", ",", "err", ":", "message", "=", "str", "(", "err", ")", "success", "=", "False", "signals", ".", "post_save", ".", "connect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "end_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "end_time", "-", "start_time", "response_time", "=", "'%s.%s'", "%", "(", "delta", ".", "seconds", ",", "delta", ".", "microseconds", ")", "check", "=", "Check", "(", "content_object", "=", "self", ",", "success", "=", "success", ",", "response_time", "=", "response_time", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Layer checked in %s seconds, status is %s'", "%", "(", "response_time", ",", "success", ")", ")", "return", "success", ",", "message" ]
Check for availability of a layer and provide run metrics.
[ "Check", "for", "availability", "of", "a", "layer", "and", "provide", "run", "metrics", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L800-L837
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.registry_tags
def registry_tags(self, query_string='{http://gis.harvard.edu/HHypermap/registry/0.1}property'): """ Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/> """ from pycsw.core.etree import etree parsed = etree.fromstring(self.xml, etree.XMLParser(resolve_entities=False)) registry_tags = parsed.findall(query_string) registry_dict = {} for tag in registry_tags: try: registry_dict[tag.attrib['name']] = tag.attrib['value'] except Exception, e: LOGGER.error(e, exc_info=True) return registry_dict
python
def registry_tags(self, query_string='{http://gis.harvard.edu/HHypermap/registry/0.1}property'): """ Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/> """ from pycsw.core.etree import etree parsed = etree.fromstring(self.xml, etree.XMLParser(resolve_entities=False)) registry_tags = parsed.findall(query_string) registry_dict = {} for tag in registry_tags: try: registry_dict[tag.attrib['name']] = tag.attrib['value'] except Exception, e: LOGGER.error(e, exc_info=True) return registry_dict
[ "def", "registry_tags", "(", "self", ",", "query_string", "=", "'{http://gis.harvard.edu/HHypermap/registry/0.1}property'", ")", ":", "from", "pycsw", ".", "core", ".", "etree", "import", "etree", "parsed", "=", "etree", ".", "fromstring", "(", "self", ".", "xml", ",", "etree", ".", "XMLParser", "(", "resolve_entities", "=", "False", ")", ")", "registry_tags", "=", "parsed", ".", "findall", "(", "query_string", ")", "registry_dict", "=", "{", "}", "for", "tag", "in", "registry_tags", ":", "try", ":", "registry_dict", "[", "tag", ".", "attrib", "[", "'name'", "]", "]", "=", "tag", ".", "attrib", "[", "'value'", "]", "except", "Exception", ",", "e", ":", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "return", "registry_dict" ]
Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/>
[ "Get", "extra", "metadata", "tagged", "with", "a", "registry", "keyword", ".", "For", "example", ":", "<registry", ":", "property", "name", "=", "nomination", "/", "serviceOwner", "value", "=", "True", "/", ">", "<registry", ":", "property", "name", "=", "nominator", "/", "name", "value", "=", "Random", "Person", "/", ">", "<registry", ":", "property", "name", "=", "nominator", "/", "email", "value", "=", "contact@example", ".", "com", "/", ">", "<registry", ":", "property", "name", "=", "lastmodifiedby", "value", "=", "2016", "-", "10", "-", "23", "/", ">", "<registry", ":", "property", "name", "=", "updateFreq", "value", "=", "as", "needed", "/", ">", "<registry", ":", "property", "name", "=", "mission", "value", "=", "RIO", "/", ">", "<registry", ":", "property", "name", "=", "authentication", "value", "=", "Open", "/", ">", "<registry", ":", "property", "name", "=", "crisis", "value", "=", "False", "/", ">", "<registry", ":", "property", "name", "=", "intlAgreement", "/", "multi", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "intlAgreement", "/", "bilateral", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "classificationRecord", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "classificationData", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "serviceName", "/", "classification", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "serviceName", "/", "classification", "/", "classifiedBy", "value", "=", "TNT", "/", ">", "<registry", ":", "property", "name", "=", "description", "/", "classification", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "description", "/", "classification", "/", "classifiedBy", "value", "=", "TNT", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Primary", "/", "owner", "value", "=", "Help", "Desk", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Primary", "/", "organization", "value", "=", "Three", "Letter", "One", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Email", "value", "=", "contact@example", ".", "com", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Phone", "value", "=", "Toll", "-", "free", ":", "1", "800", "555", "-", "5555", "/", ">", "<registry", ":", "property", "name", "=", "license", "/", "restrictions", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "license", "/", "copyright", "value", "=", "Private", ".", "For", "testing", "purposes", ".", "/", ">" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L839-L879
train
sethmlarson/trytravis
trytravis.py
_input_github_repo
def _input_github_repo(url=None): """ Grabs input from the user and saves it as their trytravis target repo """ if url is None: url = user_input('Input the URL of the GitHub repository ' 'to use as a `trytravis` repository: ') url = url.strip() http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('That URL doesn\'t look like a valid ' 'GitHub URL. We expect something ' 'of the form: `https://github.com/[USERNAME]/' '[REPOSITORY]` or `ssh://git@github.com/' '[USERNAME]/[REPOSITORY]') # Make sure that the user actually made a new repository on GitHub. if http_match: _, name = http_match.groups() else: _, name = ssh_match.groups() if 'trytravis' not in name: raise RuntimeError('You must have `trytravis` in the name of your ' 'repository. This is a security feature to reduce ' 'chances of running git push -f on a repository ' 'you don\'t mean to.') # Make sure that the user actually wants to use this repository. accept = user_input('Remember that `trytravis` will make commits on your ' 'behalf to `%s`. Are you sure you wish to use this ' 'repository? Type `y` or `yes` to accept: ' % url) if accept.lower() not in ['y', 'yes']: raise RuntimeError('Operation aborted by user.') if not os.path.isdir(config_dir): os.makedirs(config_dir) with open(os.path.join(config_dir, 'repo'), 'w+') as f: f.truncate() f.write(url) print('Repository saved successfully.')
python
def _input_github_repo(url=None): """ Grabs input from the user and saves it as their trytravis target repo """ if url is None: url = user_input('Input the URL of the GitHub repository ' 'to use as a `trytravis` repository: ') url = url.strip() http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('That URL doesn\'t look like a valid ' 'GitHub URL. We expect something ' 'of the form: `https://github.com/[USERNAME]/' '[REPOSITORY]` or `ssh://git@github.com/' '[USERNAME]/[REPOSITORY]') # Make sure that the user actually made a new repository on GitHub. if http_match: _, name = http_match.groups() else: _, name = ssh_match.groups() if 'trytravis' not in name: raise RuntimeError('You must have `trytravis` in the name of your ' 'repository. This is a security feature to reduce ' 'chances of running git push -f on a repository ' 'you don\'t mean to.') # Make sure that the user actually wants to use this repository. accept = user_input('Remember that `trytravis` will make commits on your ' 'behalf to `%s`. Are you sure you wish to use this ' 'repository? Type `y` or `yes` to accept: ' % url) if accept.lower() not in ['y', 'yes']: raise RuntimeError('Operation aborted by user.') if not os.path.isdir(config_dir): os.makedirs(config_dir) with open(os.path.join(config_dir, 'repo'), 'w+') as f: f.truncate() f.write(url) print('Repository saved successfully.')
[ "def", "_input_github_repo", "(", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "url", "=", "user_input", "(", "'Input the URL of the GitHub repository '", "'to use as a `trytravis` repository: '", ")", "url", "=", "url", ".", "strip", "(", ")", "http_match", "=", "_HTTPS_REGEX", ".", "match", "(", "url", ")", "ssh_match", "=", "_SSH_REGEX", ".", "match", "(", "url", ")", "if", "not", "http_match", "and", "not", "ssh_match", ":", "raise", "RuntimeError", "(", "'That URL doesn\\'t look like a valid '", "'GitHub URL. We expect something '", "'of the form: `https://github.com/[USERNAME]/'", "'[REPOSITORY]` or `ssh://git@github.com/'", "'[USERNAME]/[REPOSITORY]'", ")", "# Make sure that the user actually made a new repository on GitHub.", "if", "http_match", ":", "_", ",", "name", "=", "http_match", ".", "groups", "(", ")", "else", ":", "_", ",", "name", "=", "ssh_match", ".", "groups", "(", ")", "if", "'trytravis'", "not", "in", "name", ":", "raise", "RuntimeError", "(", "'You must have `trytravis` in the name of your '", "'repository. This is a security feature to reduce '", "'chances of running git push -f on a repository '", "'you don\\'t mean to.'", ")", "# Make sure that the user actually wants to use this repository.", "accept", "=", "user_input", "(", "'Remember that `trytravis` will make commits on your '", "'behalf to `%s`. Are you sure you wish to use this '", "'repository? Type `y` or `yes` to accept: '", "%", "url", ")", "if", "accept", ".", "lower", "(", ")", "not", "in", "[", "'y'", ",", "'yes'", "]", ":", "raise", "RuntimeError", "(", "'Operation aborted by user.'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "config_dir", ")", ":", "os", ".", "makedirs", "(", "config_dir", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "config_dir", ",", "'repo'", ")", ",", "'w+'", ")", "as", "f", ":", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "url", ")", "print", "(", "'Repository saved successfully.'", ")" ]
Grabs input from the user and saves it as their trytravis target repo
[ "Grabs", "input", "from", "the", "user", "and", "saves", "it", "as", "their", "trytravis", "target", "repo" ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L86-L125
train
sethmlarson/trytravis
trytravis.py
_load_github_repo
def _load_github_repo(): """ Loads the GitHub repository from the users config. """ if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
python
def _load_github_repo(): """ Loads the GitHub repository from the users config. """ if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
[ "def", "_load_github_repo", "(", ")", ":", "if", "'TRAVIS'", "in", "os", ".", "environ", ":", "raise", "RuntimeError", "(", "'Detected that we are running in Travis. '", "'Stopping to prevent infinite loops.'", ")", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "config_dir", ",", "'repo'", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "raise", "RuntimeError", "(", "'Could not find your repository. '", "'Have you ran `trytravis --repo`?'", ")" ]
Loads the GitHub repository from the users config.
[ "Loads", "the", "GitHub", "repository", "from", "the", "users", "config", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L128-L138
train
sethmlarson/trytravis
trytravis.py
_submit_changes_to_github_repo
def _submit_changes_to_github_repo(path, url): """ Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary. """ try: repo = git.Repo(path) except Exception: raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path) commited = False try: try: repo.delete_remote('trytravis') except Exception: pass print('Adding a temporary remote to ' '`%s`...' % url) remote = repo.create_remote('trytravis', url) print('Adding all local changes...') repo.git.add('--all') try: print('Committing local changes...') timestamp = datetime.datetime.now().isoformat() repo.git.commit(m='trytravis-' + timestamp) commited = True except git.exc.GitCommandError as e: if 'nothing to commit' in str(e): commited = False else: raise commit = repo.head.commit.hexsha committed_at = repo.head.commit.committed_datetime print('Pushing to `trytravis` remote...') remote.push(force=True) finally: if commited: print('Reverting to old state...') repo.git.reset('HEAD^') try: repo.delete_remote('trytravis') except Exception: pass return commit, committed_at
python
def _submit_changes_to_github_repo(path, url): """ Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary. """ try: repo = git.Repo(path) except Exception: raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path) commited = False try: try: repo.delete_remote('trytravis') except Exception: pass print('Adding a temporary remote to ' '`%s`...' % url) remote = repo.create_remote('trytravis', url) print('Adding all local changes...') repo.git.add('--all') try: print('Committing local changes...') timestamp = datetime.datetime.now().isoformat() repo.git.commit(m='trytravis-' + timestamp) commited = True except git.exc.GitCommandError as e: if 'nothing to commit' in str(e): commited = False else: raise commit = repo.head.commit.hexsha committed_at = repo.head.commit.committed_datetime print('Pushing to `trytravis` remote...') remote.push(force=True) finally: if commited: print('Reverting to old state...') repo.git.reset('HEAD^') try: repo.delete_remote('trytravis') except Exception: pass return commit, committed_at
[ "def", "_submit_changes_to_github_repo", "(", "path", ",", "url", ")", ":", "try", ":", "repo", "=", "git", ".", "Repo", "(", "path", ")", "except", "Exception", ":", "raise", "RuntimeError", "(", "'Couldn\\'t locate a repository at `%s`.'", "%", "path", ")", "commited", "=", "False", "try", ":", "try", ":", "repo", ".", "delete_remote", "(", "'trytravis'", ")", "except", "Exception", ":", "pass", "print", "(", "'Adding a temporary remote to '", "'`%s`...'", "%", "url", ")", "remote", "=", "repo", ".", "create_remote", "(", "'trytravis'", ",", "url", ")", "print", "(", "'Adding all local changes...'", ")", "repo", ".", "git", ".", "add", "(", "'--all'", ")", "try", ":", "print", "(", "'Committing local changes...'", ")", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "repo", ".", "git", ".", "commit", "(", "m", "=", "'trytravis-'", "+", "timestamp", ")", "commited", "=", "True", "except", "git", ".", "exc", ".", "GitCommandError", "as", "e", ":", "if", "'nothing to commit'", "in", "str", "(", "e", ")", ":", "commited", "=", "False", "else", ":", "raise", "commit", "=", "repo", ".", "head", ".", "commit", ".", "hexsha", "committed_at", "=", "repo", ".", "head", ".", "commit", ".", "committed_datetime", "print", "(", "'Pushing to `trytravis` remote...'", ")", "remote", ".", "push", "(", "force", "=", "True", ")", "finally", ":", "if", "commited", ":", "print", "(", "'Reverting to old state...'", ")", "repo", ".", "git", ".", "reset", "(", "'HEAD^'", ")", "try", ":", "repo", ".", "delete_remote", "(", "'trytravis'", ")", "except", "Exception", ":", "pass", "return", "commit", ",", "committed_at" ]
Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary.
[ "Temporarily", "commits", "local", "changes", "and", "submits", "them", "to", "the", "GitHub", "repository", "that", "the", "user", "has", "specified", ".", "Then", "reverts", "the", "changes", "to", "the", "git", "repository", "if", "a", "commit", "was", "necessary", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L141-L185
train
sethmlarson/trytravis
trytravis.py
_wait_for_travis_build
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
python
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
[ "def", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed_at", ")", ":", "print", "(", "'Waiting for a Travis build to appear '", "'for `%s` after `%s`...'", "%", "(", "commit", ",", "committed_at", ")", ")", "import", "requests", "slug", "=", "_slug_from_url", "(", "url", ")", "start_time", "=", "time", ".", "time", "(", ")", "build_id", "=", "None", "while", "time", ".", "time", "(", ")", "-", "start_time", "<", "60", ":", "with", "requests", ".", "get", "(", "'https://api.travis-ci.org/repos/%s/builds'", "%", "slug", ",", "headers", "=", "_travis_headers", "(", ")", ")", "as", "r", ":", "if", "not", "r", ".", "ok", ":", "raise", "RuntimeError", "(", "'Could not reach the Travis API '", "'endpoint. Additional information: '", "'%s'", "%", "str", "(", "r", ".", "content", ")", ")", "# Search through all commits and builds to find our build.", "commit_to_sha", "=", "{", "}", "json", "=", "r", ".", "json", "(", ")", "for", "travis_commit", "in", "sorted", "(", "json", "[", "'commits'", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "'committed_at'", "]", ")", ":", "travis_committed_at", "=", "datetime", ".", "datetime", ".", "strptime", "(", "travis_commit", "[", "'committed_at'", "]", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")", "if", "travis_committed_at", "<", "committed_at", ":", "continue", "commit_to_sha", "[", "travis_commit", "[", "'id'", "]", "]", "=", "travis_commit", "[", "'sha'", "]", "for", "build", "in", "json", "[", "'builds'", "]", ":", "if", "(", "build", "[", "'commit_id'", "]", "in", "commit_to_sha", "and", "commit_to_sha", "[", "build", "[", "'commit_id'", "]", "]", "==", "commit", ")", ":", "build_id", "=", "build", "[", "'id'", "]", "print", "(", "'Travis build id: `%d`'", "%", "build_id", ")", "print", "(", "'Travis build URL: `https://travis-ci.org/'", "'%s/builds/%d`'", "%", "(", "slug", ",", "build_id", ")", ")", "if", "build_id", "is", "not", "None", ":", "break", "time", ".", "sleep", "(", "3.0", ")", "else", ":", "raise", "RuntimeError", "(", "'Timed out while waiting for a Travis build '", "'to start. Is Travis configured for `%s`?'", "%", "url", ")", "return", "build_id" ]
Waits for a Travis build to appear with the given commit SHA
[ "Waits", "for", "a", "Travis", "build", "to", "appear", "with", "the", "given", "commit", "SHA" ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L188-L234
train
sethmlarson/trytravis
trytravis.py
_watch_travis_build
def _watch_travis_build(build_id): """ Watches and progressively outputs information about a given Travis build """ import requests try: build_size = None # type: int running = True while running: with requests.get('https://api.travis-ci.org/builds/%d' % build_id, headers=_travis_headers()) as r: json = r.json() if build_size is not None: if build_size > 1: sys.stdout.write('\r\x1b[%dA' % build_size) else: sys.stdout.write('\r') build_size = len(json['jobs']) running = False current_number = 1 for job in json['jobs']: # pragma: no coverage color, state, is_running = _travis_job_state(job['state']) if is_running: running = True platform = job['config']['os'] if platform == 'osx': platform = ' osx ' env = job['config'].get('env', '') sudo = 's' if job['config'].get('sudo', True) else 'c' lang = job['config'].get('language', 'generic') padding = ' ' * (len(str(build_size)) - len(str(current_number))) number = str(current_number) + padding current_number += 1 job_display = '#' + ' '.join([number, state, platform, sudo, lang, env]) print(color + job_display + colorama.Style.RESET_ALL) time.sleep(3.0) except KeyboardInterrupt: pass
python
def _watch_travis_build(build_id): """ Watches and progressively outputs information about a given Travis build """ import requests try: build_size = None # type: int running = True while running: with requests.get('https://api.travis-ci.org/builds/%d' % build_id, headers=_travis_headers()) as r: json = r.json() if build_size is not None: if build_size > 1: sys.stdout.write('\r\x1b[%dA' % build_size) else: sys.stdout.write('\r') build_size = len(json['jobs']) running = False current_number = 1 for job in json['jobs']: # pragma: no coverage color, state, is_running = _travis_job_state(job['state']) if is_running: running = True platform = job['config']['os'] if platform == 'osx': platform = ' osx ' env = job['config'].get('env', '') sudo = 's' if job['config'].get('sudo', True) else 'c' lang = job['config'].get('language', 'generic') padding = ' ' * (len(str(build_size)) - len(str(current_number))) number = str(current_number) + padding current_number += 1 job_display = '#' + ' '.join([number, state, platform, sudo, lang, env]) print(color + job_display + colorama.Style.RESET_ALL) time.sleep(3.0) except KeyboardInterrupt: pass
[ "def", "_watch_travis_build", "(", "build_id", ")", ":", "import", "requests", "try", ":", "build_size", "=", "None", "# type: int", "running", "=", "True", "while", "running", ":", "with", "requests", ".", "get", "(", "'https://api.travis-ci.org/builds/%d'", "%", "build_id", ",", "headers", "=", "_travis_headers", "(", ")", ")", "as", "r", ":", "json", "=", "r", ".", "json", "(", ")", "if", "build_size", "is", "not", "None", ":", "if", "build_size", ">", "1", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r\\x1b[%dA'", "%", "build_size", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "build_size", "=", "len", "(", "json", "[", "'jobs'", "]", ")", "running", "=", "False", "current_number", "=", "1", "for", "job", "in", "json", "[", "'jobs'", "]", ":", "# pragma: no coverage", "color", ",", "state", ",", "is_running", "=", "_travis_job_state", "(", "job", "[", "'state'", "]", ")", "if", "is_running", ":", "running", "=", "True", "platform", "=", "job", "[", "'config'", "]", "[", "'os'", "]", "if", "platform", "==", "'osx'", ":", "platform", "=", "' osx '", "env", "=", "job", "[", "'config'", "]", ".", "get", "(", "'env'", ",", "''", ")", "sudo", "=", "'s'", "if", "job", "[", "'config'", "]", ".", "get", "(", "'sudo'", ",", "True", ")", "else", "'c'", "lang", "=", "job", "[", "'config'", "]", ".", "get", "(", "'language'", ",", "'generic'", ")", "padding", "=", "' '", "*", "(", "len", "(", "str", "(", "build_size", ")", ")", "-", "len", "(", "str", "(", "current_number", ")", ")", ")", "number", "=", "str", "(", "current_number", ")", "+", "padding", "current_number", "+=", "1", "job_display", "=", "'#'", "+", "' '", ".", "join", "(", "[", "number", ",", "state", ",", "platform", ",", "sudo", ",", "lang", ",", "env", "]", ")", "print", "(", "color", "+", "job_display", "+", "colorama", ".", "Style", ".", "RESET_ALL", ")", "time", ".", "sleep", "(", "3.0", ")", "except", "KeyboardInterrupt", ":", "pass" ]
Watches and progressively outputs information about a given Travis build
[ "Watches", "and", "progressively", "outputs", "information", "about", "a", "given", "Travis", "build" ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L237-L286
train
sethmlarson/trytravis
trytravis.py
_travis_job_state
def _travis_job_state(state): """ Converts a Travis state into a state character, color, and whether it's still running or a stopped state. """ if state in [None, 'queued', 'created', 'received']: return colorama.Fore.YELLOW, '*', True elif state in ['started', 'running']: return colorama.Fore.LIGHTYELLOW_EX, '*', True elif state == 'passed': return colorama.Fore.LIGHTGREEN_EX, 'P', False elif state == 'failed': return colorama.Fore.LIGHTRED_EX, 'X', False elif state == 'errored': return colorama.Fore.LIGHTRED_EX, '!', False elif state == 'canceled': return colorama.Fore.LIGHTBLACK_EX, 'X', False else: raise RuntimeError('unknown state: %s' % str(state))
python
def _travis_job_state(state): """ Converts a Travis state into a state character, color, and whether it's still running or a stopped state. """ if state in [None, 'queued', 'created', 'received']: return colorama.Fore.YELLOW, '*', True elif state in ['started', 'running']: return colorama.Fore.LIGHTYELLOW_EX, '*', True elif state == 'passed': return colorama.Fore.LIGHTGREEN_EX, 'P', False elif state == 'failed': return colorama.Fore.LIGHTRED_EX, 'X', False elif state == 'errored': return colorama.Fore.LIGHTRED_EX, '!', False elif state == 'canceled': return colorama.Fore.LIGHTBLACK_EX, 'X', False else: raise RuntimeError('unknown state: %s' % str(state))
[ "def", "_travis_job_state", "(", "state", ")", ":", "if", "state", "in", "[", "None", ",", "'queued'", ",", "'created'", ",", "'received'", "]", ":", "return", "colorama", ".", "Fore", ".", "YELLOW", ",", "'*'", ",", "True", "elif", "state", "in", "[", "'started'", ",", "'running'", "]", ":", "return", "colorama", ".", "Fore", ".", "LIGHTYELLOW_EX", ",", "'*'", ",", "True", "elif", "state", "==", "'passed'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTGREEN_EX", ",", "'P'", ",", "False", "elif", "state", "==", "'failed'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTRED_EX", ",", "'X'", ",", "False", "elif", "state", "==", "'errored'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTRED_EX", ",", "'!'", ",", "False", "elif", "state", "==", "'canceled'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTBLACK_EX", ",", "'X'", ",", "False", "else", ":", "raise", "RuntimeError", "(", "'unknown state: %s'", "%", "str", "(", "state", ")", ")" ]
Converts a Travis state into a state character, color, and whether it's still running or a stopped state.
[ "Converts", "a", "Travis", "state", "into", "a", "state", "character", "color", "and", "whether", "it", "s", "still", "running", "or", "a", "stopped", "state", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L289-L305
train
sethmlarson/trytravis
trytravis.py
_slug_from_url
def _slug_from_url(url): """ Parses a project slug out of either an HTTPS or SSH URL. """ http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('Could not parse the URL (`%s`) ' 'for your repository.' % url) if http_match: return '/'.join(http_match.groups()) else: return '/'.join(ssh_match.groups())
python
def _slug_from_url(url): """ Parses a project slug out of either an HTTPS or SSH URL. """ http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('Could not parse the URL (`%s`) ' 'for your repository.' % url) if http_match: return '/'.join(http_match.groups()) else: return '/'.join(ssh_match.groups())
[ "def", "_slug_from_url", "(", "url", ")", ":", "http_match", "=", "_HTTPS_REGEX", ".", "match", "(", "url", ")", "ssh_match", "=", "_SSH_REGEX", ".", "match", "(", "url", ")", "if", "not", "http_match", "and", "not", "ssh_match", ":", "raise", "RuntimeError", "(", "'Could not parse the URL (`%s`) '", "'for your repository.'", "%", "url", ")", "if", "http_match", ":", "return", "'/'", ".", "join", "(", "http_match", ".", "groups", "(", ")", ")", "else", ":", "return", "'/'", ".", "join", "(", "ssh_match", ".", "groups", "(", ")", ")" ]
Parses a project slug out of either an HTTPS or SSH URL.
[ "Parses", "a", "project", "slug", "out", "of", "either", "an", "HTTPS", "or", "SSH", "URL", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L308-L318
train
sethmlarson/trytravis
trytravis.py
_version_string
def _version_string(): """ Gets the output for `trytravis --version`. """ platform_system = platform.system() if platform_system == 'Linux': os_name, os_version, _ = platform.dist() else: os_name = platform_system os_version = platform.version() python_version = platform.python_version() return 'trytravis %s (%s %s, python %s)' % (__version__, os_name.lower(), os_version, python_version)
python
def _version_string(): """ Gets the output for `trytravis --version`. """ platform_system = platform.system() if platform_system == 'Linux': os_name, os_version, _ = platform.dist() else: os_name = platform_system os_version = platform.version() python_version = platform.python_version() return 'trytravis %s (%s %s, python %s)' % (__version__, os_name.lower(), os_version, python_version)
[ "def", "_version_string", "(", ")", ":", "platform_system", "=", "platform", ".", "system", "(", ")", "if", "platform_system", "==", "'Linux'", ":", "os_name", ",", "os_version", ",", "_", "=", "platform", ".", "dist", "(", ")", "else", ":", "os_name", "=", "platform_system", "os_version", "=", "platform", ".", "version", "(", ")", "python_version", "=", "platform", ".", "python_version", "(", ")", "return", "'trytravis %s (%s %s, python %s)'", "%", "(", "__version__", ",", "os_name", ".", "lower", "(", ")", ",", "os_version", ",", "python_version", ")" ]
Gets the output for `trytravis --version`.
[ "Gets", "the", "output", "for", "trytravis", "--", "version", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L321-L333
train
sethmlarson/trytravis
trytravis.py
_main
def _main(argv): """ Function that acts just like main() except doesn't catch exceptions. """ repo_input_argv = len(argv) == 2 and argv[0] in ['--repo', '-r', '-R'] # We only support a single argv parameter. if len(argv) > 1 and not repo_input_argv: _main(['--help']) # Parse the command and do the right thing. if len(argv) == 1 or repo_input_argv: arg = argv[0] # Help/usage if arg in ['-h', '--help', '-H']: print(_USAGE) # Version elif arg in ['-v', '--version', '-V']: print(_version_string()) # Token elif arg in ['-r', '--repo', '-R']: if len(argv) == 2: url = argv[1] else: url = None _input_github_repo(url) # No wait elif arg in ['--no-wait', '-nw']: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) # Help string else: _main(['--help']) # No arguments means we're trying to submit to Travis. elif len(argv) == 0: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) _watch_travis_build(build_id)
python
def _main(argv): """ Function that acts just like main() except doesn't catch exceptions. """ repo_input_argv = len(argv) == 2 and argv[0] in ['--repo', '-r', '-R'] # We only support a single argv parameter. if len(argv) > 1 and not repo_input_argv: _main(['--help']) # Parse the command and do the right thing. if len(argv) == 1 or repo_input_argv: arg = argv[0] # Help/usage if arg in ['-h', '--help', '-H']: print(_USAGE) # Version elif arg in ['-v', '--version', '-V']: print(_version_string()) # Token elif arg in ['-r', '--repo', '-R']: if len(argv) == 2: url = argv[1] else: url = None _input_github_repo(url) # No wait elif arg in ['--no-wait', '-nw']: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) # Help string else: _main(['--help']) # No arguments means we're trying to submit to Travis. elif len(argv) == 0: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) _watch_travis_build(build_id)
[ "def", "_main", "(", "argv", ")", ":", "repo_input_argv", "=", "len", "(", "argv", ")", "==", "2", "and", "argv", "[", "0", "]", "in", "[", "'--repo'", ",", "'-r'", ",", "'-R'", "]", "# We only support a single argv parameter.", "if", "len", "(", "argv", ")", ">", "1", "and", "not", "repo_input_argv", ":", "_main", "(", "[", "'--help'", "]", ")", "# Parse the command and do the right thing.", "if", "len", "(", "argv", ")", "==", "1", "or", "repo_input_argv", ":", "arg", "=", "argv", "[", "0", "]", "# Help/usage", "if", "arg", "in", "[", "'-h'", ",", "'--help'", ",", "'-H'", "]", ":", "print", "(", "_USAGE", ")", "# Version", "elif", "arg", "in", "[", "'-v'", ",", "'--version'", ",", "'-V'", "]", ":", "print", "(", "_version_string", "(", ")", ")", "# Token", "elif", "arg", "in", "[", "'-r'", ",", "'--repo'", ",", "'-R'", "]", ":", "if", "len", "(", "argv", ")", "==", "2", ":", "url", "=", "argv", "[", "1", "]", "else", ":", "url", "=", "None", "_input_github_repo", "(", "url", ")", "# No wait", "elif", "arg", "in", "[", "'--no-wait'", ",", "'-nw'", "]", ":", "url", "=", "_load_github_repo", "(", ")", "commit", ",", "committed", "=", "_submit_changes_to_github_repo", "(", "os", ".", "getcwd", "(", ")", ",", "url", ")", "build_id", "=", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed", ")", "# Help string", "else", ":", "_main", "(", "[", "'--help'", "]", ")", "# No arguments means we're trying to submit to Travis.", "elif", "len", "(", "argv", ")", "==", "0", ":", "url", "=", "_load_github_repo", "(", ")", "commit", ",", "committed", "=", "_submit_changes_to_github_repo", "(", "os", ".", "getcwd", "(", ")", ",", "url", ")", "build_id", "=", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed", ")", "_watch_travis_build", "(", "build_id", ")" ]
Function that acts just like main() except doesn't catch exceptions.
[ "Function", "that", "acts", "just", "like", "main", "()", "except", "doesn", "t", "catch", "exceptions", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L343-L388
train
sethmlarson/trytravis
trytravis.py
main
def main(argv=None): # pragma: no coverage """ Main entry point when the user runs the `trytravis` command. """ try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
python
def main(argv=None): # pragma: no coverage """ Main entry point when the user runs the `trytravis` command. """ try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "# pragma: no coverage", "try", ":", "colorama", ".", "init", "(", ")", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "_main", "(", "argv", ")", "except", "RuntimeError", "as", "e", ":", "print", "(", "colorama", ".", "Fore", ".", "RED", "+", "'ERROR: '", "+", "str", "(", "e", ")", "+", "colorama", ".", "Style", ".", "RESET_ALL", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "sys", ".", "exit", "(", "0", ")" ]
Main entry point when the user runs the `trytravis` command.
[ "Main", "entry", "point", "when", "the", "user", "runs", "the", "trytravis", "command", "." ]
d92ed708fe71d8db93a6df8077d23ee39ec0364e
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L391-L403
train
cga-harvard/Hypermap-Registry
hypermap/search/views.py
csw_global_dispatch
def csw_global_dispatch(request, url=None, catalog_id=None): """pycsw wrapper""" if request.user.is_authenticated(): # turn on CSW-T settings.REGISTRY_PYCSW['manager']['transactions'] = 'true' env = request.META.copy() # TODO: remove this workaround # HH should be able to pass env['wsgi.input'] without hanging # details at https://github.com/cga-harvard/HHypermap/issues/94 if request.method == 'POST': from StringIO import StringIO env['wsgi.input'] = StringIO(request.body) env.update({'local.app_root': os.path.dirname(__file__), 'REQUEST_URI': request.build_absolute_uri()}) # if this is a catalog based CSW, then update settings if url is not None: settings.REGISTRY_PYCSW['server']['url'] = url if catalog_id is not None: settings.REGISTRY_PYCSW['repository']['filter'] = 'catalog_id = %d' % catalog_id csw = server.Csw(settings.REGISTRY_PYCSW, env) content = csw.dispatch_wsgi() # pycsw 2.0 has an API break: # pycsw < 2.0: content = xml_response # pycsw >= 2.0: content = [http_status_code, content] # deal with the API break if isinstance(content, list): # pycsw 2.0+ content = content[1] response = HttpResponse(content, content_type=csw.contenttype) # TODO: Fix before 1.0 release. CORS should not be enabled blindly like this. response['Access-Control-Allow-Origin'] = '*' return response
python
def csw_global_dispatch(request, url=None, catalog_id=None): """pycsw wrapper""" if request.user.is_authenticated(): # turn on CSW-T settings.REGISTRY_PYCSW['manager']['transactions'] = 'true' env = request.META.copy() # TODO: remove this workaround # HH should be able to pass env['wsgi.input'] without hanging # details at https://github.com/cga-harvard/HHypermap/issues/94 if request.method == 'POST': from StringIO import StringIO env['wsgi.input'] = StringIO(request.body) env.update({'local.app_root': os.path.dirname(__file__), 'REQUEST_URI': request.build_absolute_uri()}) # if this is a catalog based CSW, then update settings if url is not None: settings.REGISTRY_PYCSW['server']['url'] = url if catalog_id is not None: settings.REGISTRY_PYCSW['repository']['filter'] = 'catalog_id = %d' % catalog_id csw = server.Csw(settings.REGISTRY_PYCSW, env) content = csw.dispatch_wsgi() # pycsw 2.0 has an API break: # pycsw < 2.0: content = xml_response # pycsw >= 2.0: content = [http_status_code, content] # deal with the API break if isinstance(content, list): # pycsw 2.0+ content = content[1] response = HttpResponse(content, content_type=csw.contenttype) # TODO: Fix before 1.0 release. CORS should not be enabled blindly like this. response['Access-Control-Allow-Origin'] = '*' return response
[ "def", "csw_global_dispatch", "(", "request", ",", "url", "=", "None", ",", "catalog_id", "=", "None", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "# turn on CSW-T", "settings", ".", "REGISTRY_PYCSW", "[", "'manager'", "]", "[", "'transactions'", "]", "=", "'true'", "env", "=", "request", ".", "META", ".", "copy", "(", ")", "# TODO: remove this workaround", "# HH should be able to pass env['wsgi.input'] without hanging", "# details at https://github.com/cga-harvard/HHypermap/issues/94", "if", "request", ".", "method", "==", "'POST'", ":", "from", "StringIO", "import", "StringIO", "env", "[", "'wsgi.input'", "]", "=", "StringIO", "(", "request", ".", "body", ")", "env", ".", "update", "(", "{", "'local.app_root'", ":", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'REQUEST_URI'", ":", "request", ".", "build_absolute_uri", "(", ")", "}", ")", "# if this is a catalog based CSW, then update settings", "if", "url", "is", "not", "None", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'server'", "]", "[", "'url'", "]", "=", "url", "if", "catalog_id", "is", "not", "None", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'repository'", "]", "[", "'filter'", "]", "=", "'catalog_id = %d'", "%", "catalog_id", "csw", "=", "server", ".", "Csw", "(", "settings", ".", "REGISTRY_PYCSW", ",", "env", ")", "content", "=", "csw", ".", "dispatch_wsgi", "(", ")", "# pycsw 2.0 has an API break:", "# pycsw < 2.0: content = xml_response", "# pycsw >= 2.0: content = [http_status_code, content]", "# deal with the API break", "if", "isinstance", "(", "content", ",", "list", ")", ":", "# pycsw 2.0+", "content", "=", "content", "[", "1", "]", "response", "=", "HttpResponse", "(", "content", ",", "content_type", "=", "csw", ".", "contenttype", ")", "# TODO: Fix before 1.0 release. CORS should not be enabled blindly like this.", "response", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "return", "response" ]
pycsw wrapper
[ "pycsw", "wrapper" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L19-L59
train
cga-harvard/Hypermap-Registry
hypermap/search/views.py
csw_global_dispatch_by_catalog
def csw_global_dispatch_by_catalog(request, catalog_slug): """pycsw wrapper for catalogs""" catalog = get_object_or_404(Catalog, slug=catalog_slug) if catalog: # define catalog specific settings url = settings.SITE_URL.rstrip('/') + request.path.rstrip('/') return csw_global_dispatch(request, url=url, catalog_id=catalog.id)
python
def csw_global_dispatch_by_catalog(request, catalog_slug): """pycsw wrapper for catalogs""" catalog = get_object_or_404(Catalog, slug=catalog_slug) if catalog: # define catalog specific settings url = settings.SITE_URL.rstrip('/') + request.path.rstrip('/') return csw_global_dispatch(request, url=url, catalog_id=catalog.id)
[ "def", "csw_global_dispatch_by_catalog", "(", "request", ",", "catalog_slug", ")", ":", "catalog", "=", "get_object_or_404", "(", "Catalog", ",", "slug", "=", "catalog_slug", ")", "if", "catalog", ":", "# define catalog specific settings", "url", "=", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "request", ".", "path", ".", "rstrip", "(", "'/'", ")", "return", "csw_global_dispatch", "(", "request", ",", "url", "=", "url", ",", "catalog_id", "=", "catalog", ".", "id", ")" ]
pycsw wrapper for catalogs
[ "pycsw", "wrapper", "for", "catalogs" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L63-L70
train
cga-harvard/Hypermap-Registry
hypermap/search/views.py
opensearch_dispatch
def opensearch_dispatch(request): """OpenSearch wrapper""" ctx = { 'shortname': settings.REGISTRY_PYCSW['metadata:main']['identification_title'], 'description': settings.REGISTRY_PYCSW['metadata:main']['identification_abstract'], 'developer': settings.REGISTRY_PYCSW['metadata:main']['contact_name'], 'contact': settings.REGISTRY_PYCSW['metadata:main']['contact_email'], 'attribution': settings.REGISTRY_PYCSW['metadata:main']['provider_name'], 'tags': settings.REGISTRY_PYCSW['metadata:main']['identification_keywords'].replace(',', ' '), 'url': settings.SITE_URL.rstrip('/') } return render_to_response('search/opensearch_description.xml', ctx, content_type='application/opensearchdescription+xml')
python
def opensearch_dispatch(request): """OpenSearch wrapper""" ctx = { 'shortname': settings.REGISTRY_PYCSW['metadata:main']['identification_title'], 'description': settings.REGISTRY_PYCSW['metadata:main']['identification_abstract'], 'developer': settings.REGISTRY_PYCSW['metadata:main']['contact_name'], 'contact': settings.REGISTRY_PYCSW['metadata:main']['contact_email'], 'attribution': settings.REGISTRY_PYCSW['metadata:main']['provider_name'], 'tags': settings.REGISTRY_PYCSW['metadata:main']['identification_keywords'].replace(',', ' '), 'url': settings.SITE_URL.rstrip('/') } return render_to_response('search/opensearch_description.xml', ctx, content_type='application/opensearchdescription+xml')
[ "def", "opensearch_dispatch", "(", "request", ")", ":", "ctx", "=", "{", "'shortname'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_title'", "]", ",", "'description'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_abstract'", "]", ",", "'developer'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'contact_name'", "]", ",", "'contact'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'contact_email'", "]", ",", "'attribution'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'provider_name'", "]", ",", "'tags'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_keywords'", "]", ".", "replace", "(", "','", ",", "' '", ")", ",", "'url'", ":", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "}", "return", "render_to_response", "(", "'search/opensearch_description.xml'", ",", "ctx", ",", "content_type", "=", "'application/opensearchdescription+xml'", ")" ]
OpenSearch wrapper
[ "OpenSearch", "wrapper" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L73-L87
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.good_coords
def good_coords(coords): """ passed a string array """ if (len(coords) != 4): return False for coord in coords[0:3]: try: num = float(coord) if (math.isnan(num)): return False if (math.isinf(num)): return False except ValueError: return False return True
python
def good_coords(coords): """ passed a string array """ if (len(coords) != 4): return False for coord in coords[0:3]: try: num = float(coord) if (math.isnan(num)): return False if (math.isinf(num)): return False except ValueError: return False return True
[ "def", "good_coords", "(", "coords", ")", ":", "if", "(", "len", "(", "coords", ")", "!=", "4", ")", ":", "return", "False", "for", "coord", "in", "coords", "[", "0", ":", "3", "]", ":", "try", ":", "num", "=", "float", "(", "coord", ")", "if", "(", "math", ".", "isnan", "(", "num", ")", ")", ":", "return", "False", "if", "(", "math", ".", "isinf", "(", "num", ")", ")", ":", "return", "False", "except", "ValueError", ":", "return", "False", "return", "True" ]
passed a string array
[ "passed", "a", "string", "array" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L36-L49
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.clear_es
def clear_es(): """Clear all indexes in the es core""" # TODO: should receive a catalog slug. ESHypermap.es.indices.delete(ESHypermap.index_name, ignore=[400, 404]) LOGGER.debug('Elasticsearch: Index cleared')
python
def clear_es(): """Clear all indexes in the es core""" # TODO: should receive a catalog slug. ESHypermap.es.indices.delete(ESHypermap.index_name, ignore=[400, 404]) LOGGER.debug('Elasticsearch: Index cleared')
[ "def", "clear_es", "(", ")", ":", "# TODO: should receive a catalog slug.", "ESHypermap", ".", "es", ".", "indices", ".", "delete", "(", "ESHypermap", ".", "index_name", ",", "ignore", "=", "[", "400", ",", "404", "]", ")", "LOGGER", ".", "debug", "(", "'Elasticsearch: Index cleared'", ")" ]
Clear all indexes in the es core
[ "Clear", "all", "indexes", "in", "the", "es", "core" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L221-L225
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.create_indices
def create_indices(catalog_slug): """Create ES core indices """ # TODO: enable auto_create_index in the ES nodes to make this implicit. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation # http://support.searchly.com/customer/en/portal/questions/ # 16312889-is-automatic-index-creation-disabled-?new=16312889 mapping = { "mappings": { "layer": { "properties": { "layer_geoshape": { "type": "geo_shape", "tree": "quadtree", "precision": REGISTRY_MAPPING_PRECISION } } } } } ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
python
def create_indices(catalog_slug): """Create ES core indices """ # TODO: enable auto_create_index in the ES nodes to make this implicit. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation # http://support.searchly.com/customer/en/portal/questions/ # 16312889-is-automatic-index-creation-disabled-?new=16312889 mapping = { "mappings": { "layer": { "properties": { "layer_geoshape": { "type": "geo_shape", "tree": "quadtree", "precision": REGISTRY_MAPPING_PRECISION } } } } } ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
[ "def", "create_indices", "(", "catalog_slug", ")", ":", "# TODO: enable auto_create_index in the ES nodes to make this implicit.", "# https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation", "# http://support.searchly.com/customer/en/portal/questions/", "# 16312889-is-automatic-index-creation-disabled-?new=16312889", "mapping", "=", "{", "\"mappings\"", ":", "{", "\"layer\"", ":", "{", "\"properties\"", ":", "{", "\"layer_geoshape\"", ":", "{", "\"type\"", ":", "\"geo_shape\"", ",", "\"tree\"", ":", "\"quadtree\"", ",", "\"precision\"", ":", "REGISTRY_MAPPING_PRECISION", "}", "}", "}", "}", "}", "ESHypermap", ".", "es", ".", "indices", ".", "create", "(", "catalog_slug", ",", "ignore", "=", "[", "400", ",", "404", "]", ",", "body", "=", "mapping", ")" ]
Create ES core indices
[ "Create", "ES", "core", "indices" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L228-L247
train
cga-harvard/Hypermap-Registry
pavement.py
kill_process
def kill_process(procname, scriptname): """kill WSGI processes that may be running in development""" # from http://stackoverflow.com/a/2940878 import signal import subprocess p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.decode().splitlines(): if procname in line and scriptname in line: pid = int(line.split()[1]) info('Stopping %s %s %d' % (procname, scriptname, pid)) os.kill(pid, signal.SIGKILL)
python
def kill_process(procname, scriptname): """kill WSGI processes that may be running in development""" # from http://stackoverflow.com/a/2940878 import signal import subprocess p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.decode().splitlines(): if procname in line and scriptname in line: pid = int(line.split()[1]) info('Stopping %s %s %d' % (procname, scriptname, pid)) os.kill(pid, signal.SIGKILL)
[ "def", "kill_process", "(", "procname", ",", "scriptname", ")", ":", "# from http://stackoverflow.com/a/2940878", "import", "signal", "import", "subprocess", "p", "=", "subprocess", ".", "Popen", "(", "[", "'ps'", ",", "'aux'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "for", "line", "in", "out", ".", "decode", "(", ")", ".", "splitlines", "(", ")", ":", "if", "procname", "in", "line", "and", "scriptname", "in", "line", ":", "pid", "=", "int", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "info", "(", "'Stopping %s %s %d'", "%", "(", "procname", ",", "scriptname", ",", "pid", ")", ")", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGKILL", ")" ]
kill WSGI processes that may be running in development
[ "kill", "WSGI", "processes", "that", "may", "be", "running", "in", "development" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/pavement.py#L50-L64
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/populate_database.py
populate_initial_services
def populate_initial_services(): """ Populate a fresh installed Hypermap instances with basic services. """ services_list = ( ( 'Harvard WorldMap', 'Harvard WorldMap open source web geospatial platform', 'Hypermap:WorldMap', 'http://worldmap.harvard.edu' ), ( 'NYPL MapWarper', 'The New York Public Library (NYPL) MapWarper web site', 'Hypermap:WARPER', 'http://maps.nypl.org/warper/maps' ), ( 'Map Warper', 'The MapWarper web site developed, hosted and maintained by Tim Waters', 'Hypermap:WARPER', 'http://mapwarper.net/maps' ), ( 'WorldMap Warp', 'The MapWarper instance part of the Harvard WorldMap project', 'Hypermap:WARPER', 'http://warp.worldmap.harvard.edu/maps' ), ( 'WFP GeoNode', 'World Food Programme GeoNode', 'OGC:WMS', 'http://geonode.wfp.org/geoserver/ows?' ), ( 'NASA EARTHDATA', 'NASA EARTHDATA, powered by EOSDIS', 'OGC:WMTS', 'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml' ), ) esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services' LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint) create_services_from_endpoint(esri_endpoint) for service in services_list: LOGGER.debug('*** Importing %s' % service[0]) service = Service( title=service[0], abstract=service[1], type=service[2], url=service[3] ) service.save()
python
def populate_initial_services(): """ Populate a fresh installed Hypermap instances with basic services. """ services_list = ( ( 'Harvard WorldMap', 'Harvard WorldMap open source web geospatial platform', 'Hypermap:WorldMap', 'http://worldmap.harvard.edu' ), ( 'NYPL MapWarper', 'The New York Public Library (NYPL) MapWarper web site', 'Hypermap:WARPER', 'http://maps.nypl.org/warper/maps' ), ( 'Map Warper', 'The MapWarper web site developed, hosted and maintained by Tim Waters', 'Hypermap:WARPER', 'http://mapwarper.net/maps' ), ( 'WorldMap Warp', 'The MapWarper instance part of the Harvard WorldMap project', 'Hypermap:WARPER', 'http://warp.worldmap.harvard.edu/maps' ), ( 'WFP GeoNode', 'World Food Programme GeoNode', 'OGC:WMS', 'http://geonode.wfp.org/geoserver/ows?' ), ( 'NASA EARTHDATA', 'NASA EARTHDATA, powered by EOSDIS', 'OGC:WMTS', 'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml' ), ) esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services' LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint) create_services_from_endpoint(esri_endpoint) for service in services_list: LOGGER.debug('*** Importing %s' % service[0]) service = Service( title=service[0], abstract=service[1], type=service[2], url=service[3] ) service.save()
[ "def", "populate_initial_services", "(", ")", ":", "services_list", "=", "(", "(", "'Harvard WorldMap'", ",", "'Harvard WorldMap open source web geospatial platform'", ",", "'Hypermap:WorldMap'", ",", "'http://worldmap.harvard.edu'", ")", ",", "(", "'NYPL MapWarper'", ",", "'The New York Public Library (NYPL) MapWarper web site'", ",", "'Hypermap:WARPER'", ",", "'http://maps.nypl.org/warper/maps'", ")", ",", "(", "'Map Warper'", ",", "'The MapWarper web site developed, hosted and maintained by Tim Waters'", ",", "'Hypermap:WARPER'", ",", "'http://mapwarper.net/maps'", ")", ",", "(", "'WorldMap Warp'", ",", "'The MapWarper instance part of the Harvard WorldMap project'", ",", "'Hypermap:WARPER'", ",", "'http://warp.worldmap.harvard.edu/maps'", ")", ",", "(", "'WFP GeoNode'", ",", "'World Food Programme GeoNode'", ",", "'OGC:WMS'", ",", "'http://geonode.wfp.org/geoserver/ows?'", ")", ",", "(", "'NASA EARTHDATA'", ",", "'NASA EARTHDATA, powered by EOSDIS'", ",", "'OGC:WMTS'", ",", "'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml'", ")", ",", ")", "esri_endpoint", "=", "'https://gis.ngdc.noaa.gov/arcgis/rest/services'", "LOGGER", ".", "debug", "(", "'*** Importing esri endpoint: %s'", "%", "esri_endpoint", ")", "create_services_from_endpoint", "(", "esri_endpoint", ")", "for", "service", "in", "services_list", ":", "LOGGER", ".", "debug", "(", "'*** Importing %s'", "%", "service", "[", "0", "]", ")", "service", "=", "Service", "(", "title", "=", "service", "[", "0", "]", ",", "abstract", "=", "service", "[", "1", "]", ",", "type", "=", "service", "[", "2", "]", ",", "url", "=", "service", "[", "3", "]", ")", "service", ".", "save", "(", ")" ]
Populate a fresh installed Hypermap instances with basic services.
[ "Populate", "a", "fresh", "installed", "Hypermap", "instances", "with", "basic", "services", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/populate_database.py#L10-L65
train
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
elasticsearch
def elasticsearch(serializer, catalog): """ https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return: """ search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug) q_text = serializer.validated_data.get("q_text") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_user = serializer.validated_data.get("q_user") d_docs_sort = serializer.validated_data.get("d_docs_sort") d_docs_limit = int(serializer.validated_data.get("d_docs_limit")) d_docs_page = int(serializer.validated_data.get("d_docs_page")) a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_limit = serializer.validated_data.get("a_time_limit") original_response = serializer.validated_data.get("original_response") # Dict for search on Elastic engine must_array = [] filter_dic = {} aggs_dic = {} # get ES version to make the query builder to be backward compatible with # diffs versions. # TODO: move this to a proper place. maybe ES client?. # TODO: cache it to avoid overwhelm ES with this call. # TODO: ask for ES_VERSION when building queries with an elegant way. ES_VERSION = 2 response = requests.get(SEARCH_URL) if response.ok: # looks ugly but will work on normal ES response for "/". ES_VERSION = int(response.json()["version"]["number"][0]) # String searching if q_text: # Wrapping query string into a query filter. if ES_VERSION >= 2: query_string = { "query_string": { "query": q_text } } else: query_string = { "query": { "query_string": { "query": q_text } } } # add string searching must_array.append(query_string) if q_time: # check if q_time exists q_time = str(q_time) # check string shortener = q_time[1:-1] shortener = shortener.split(" TO ") gte = shortener[0] # greater than lte = shortener[1] # less than layer_date = {} if gte == '*' and lte != '*': layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte == '*': layer_date["gte"] = gte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte != '*': layer_date["gte"] = gte layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) # geo_shape searching if q_geo: q_geo = str(q_geo) q_geo = q_geo[1:-1] Ymin, Xmin = q_geo.split(" TO ")[0].split(",") Ymax, Xmax = q_geo.split(" TO ")[1].split(",") geoshape_query = { "layer_geoshape": { "shape": { "type": "envelope", "coordinates": [[Xmin, Ymax], [Xmax, Ymin]] }, "relation": "intersects" } } filter_dic["geo_shape"] = geoshape_query if q_user: # Using q_user user_searching = { "match": { "layer_originator": q_user } } must_array.append(user_searching) if ES_VERSION >= 2: dic_query = { "query": { "bool": { "must": must_array, "filter": filter_dic } } } else: dic_query = { "query": { "filtered": { "filter": { "bool": { "must": must_array, "should": filter_dic } } } } } # Page if d_docs_limit: dic_query["size"] = d_docs_limit if d_docs_page: dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit if d_docs_sort == "score": dic_query["sort"] = {"_score": {"order": "desc"}} if d_docs_sort == "time": dic_query["sort"] = {"layer_date": {"order": "desc"}} if d_docs_sort == "distance": if q_geo: # distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5)) # distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5)) msg = ("Sorting by distance is different on ElasticSearch than Solr, because this" "feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:" "Due to the complex input structure and index representation of shapes," "it is not currently possible to sort shapes or retrieve their fields directly." "The geo_shape value is only retrievable through the _source field." " Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html") return {"error": {"msg": msg}} else: msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance" return {"error": {"msg": msg}} if a_text_limit: # getting most frequently occurring users. text_limit = { "terms": { "field": "abstract", "size": a_text_limit } } aggs_dic['popular_text'] = text_limit if a_user_limit: # getting most frequently occurring users. users_limit = { "terms": { "field": "layer_originator", "size": a_user_limit } } aggs_dic['popular_users'] = users_limit if a_time_limit: # TODO: Work in progress, a_time_limit is incomplete. # TODO: when times are * it does not work. also a a_time_gap is not required. if q_time: if not a_time_gap: # getting time limit histogram. time_limt = { "date_range": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "ranges": [ {"from": gte, "to": lte} ] } } aggs_dic['range'] = time_limt else: pass else: msg = "If you want to use a_time_limit feature, q_time MUST BE initialized" return {"error": {"msg": msg}} if a_time_gap: interval = gap_to_elastic(a_time_gap) time_gap = { "date_histogram": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "interval": interval } } aggs_dic['articles_over_time'] = time_gap # adding aggreations on body query if aggs_dic: dic_query['aggs'] = aggs_dic try: res = requests.post(search_engine_endpoint, data=json.dumps(dic_query)) except Exception as e: return 500, {"error": {"msg": str(e)}} es_response = res.json() if original_response: return es_response data = {} if 'error' in es_response: data["error"] = es_response["error"] return 400, data data["request_url"] = res.url data["request_body"] = json.dumps(dic_query) data["a.matchDocs"] = es_response['hits']['total'] docs = [] # aggreations response: facets searching if 'aggregations' in es_response: aggs = es_response['aggregations'] # getting the most frequently occurring users. if 'popular_users' in aggs: a_users_list_array = [] users_resp = aggs["popular_users"]["buckets"] for item in users_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_users_list_array.append(temp) data["a.user"] = a_users_list_array # getting most frequently ocurring words if 'popular_text' in aggs: a_text_list_array = [] text_resp = es_response["aggregations"]["popular_text"]["buckets"] for item in text_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_text_list_array.append(temp) data["a.text"] = a_text_list_array if 'articles_over_time' in aggs: gap_count = [] a_gap = {} gap_resp = aggs["articles_over_time"]["buckets"] start = "*" end = "*" if len(gap_resp) > 0: start = gap_resp[0]['key_as_string'].replace('+0000', 'z') end = gap_resp[-1]['key_as_string'].replace('+0000', 'z') a_gap['start'] = start a_gap['end'] = end a_gap['gap'] = a_time_gap for item in gap_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key_as_string'].replace('+0000', 'z') gap_count.append(temp) a_gap['counts'] = gap_count data['a.time'] = a_gap if 'range' in aggs: # Work in progress # Pay attention in the following code lines: Make it better!!!! time_count = [] time_resp = aggs["range"]["buckets"] a_time = {} a_time['start'] = gte a_time['end'] = lte a_time['gap'] = None for item in time_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key'].replace('+0000', 'z') time_count.append(temp) a_time['counts'] = time_count data['a.time'] = a_time if not int(d_docs_limit) == 0: for item in es_response['hits']['hits']: # data temp = item['_source']['abstract'] temp = temp.replace(u'\u201c', "\"") temp = temp.replace(u'\u201d', "\"") temp = temp.replace('"', "\"") temp = temp.replace("'", "\'") temp = temp.replace(u'\u2019', "\'") item['_source']['abstract'] = temp docs.append(item['_source']) data["d.docs"] = docs return data
python
def elasticsearch(serializer, catalog): """ https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return: """ search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug) q_text = serializer.validated_data.get("q_text") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_user = serializer.validated_data.get("q_user") d_docs_sort = serializer.validated_data.get("d_docs_sort") d_docs_limit = int(serializer.validated_data.get("d_docs_limit")) d_docs_page = int(serializer.validated_data.get("d_docs_page")) a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_limit = serializer.validated_data.get("a_time_limit") original_response = serializer.validated_data.get("original_response") # Dict for search on Elastic engine must_array = [] filter_dic = {} aggs_dic = {} # get ES version to make the query builder to be backward compatible with # diffs versions. # TODO: move this to a proper place. maybe ES client?. # TODO: cache it to avoid overwhelm ES with this call. # TODO: ask for ES_VERSION when building queries with an elegant way. ES_VERSION = 2 response = requests.get(SEARCH_URL) if response.ok: # looks ugly but will work on normal ES response for "/". ES_VERSION = int(response.json()["version"]["number"][0]) # String searching if q_text: # Wrapping query string into a query filter. if ES_VERSION >= 2: query_string = { "query_string": { "query": q_text } } else: query_string = { "query": { "query_string": { "query": q_text } } } # add string searching must_array.append(query_string) if q_time: # check if q_time exists q_time = str(q_time) # check string shortener = q_time[1:-1] shortener = shortener.split(" TO ") gte = shortener[0] # greater than lte = shortener[1] # less than layer_date = {} if gte == '*' and lte != '*': layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte == '*': layer_date["gte"] = gte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte != '*': layer_date["gte"] = gte layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) # geo_shape searching if q_geo: q_geo = str(q_geo) q_geo = q_geo[1:-1] Ymin, Xmin = q_geo.split(" TO ")[0].split(",") Ymax, Xmax = q_geo.split(" TO ")[1].split(",") geoshape_query = { "layer_geoshape": { "shape": { "type": "envelope", "coordinates": [[Xmin, Ymax], [Xmax, Ymin]] }, "relation": "intersects" } } filter_dic["geo_shape"] = geoshape_query if q_user: # Using q_user user_searching = { "match": { "layer_originator": q_user } } must_array.append(user_searching) if ES_VERSION >= 2: dic_query = { "query": { "bool": { "must": must_array, "filter": filter_dic } } } else: dic_query = { "query": { "filtered": { "filter": { "bool": { "must": must_array, "should": filter_dic } } } } } # Page if d_docs_limit: dic_query["size"] = d_docs_limit if d_docs_page: dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit if d_docs_sort == "score": dic_query["sort"] = {"_score": {"order": "desc"}} if d_docs_sort == "time": dic_query["sort"] = {"layer_date": {"order": "desc"}} if d_docs_sort == "distance": if q_geo: # distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5)) # distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5)) msg = ("Sorting by distance is different on ElasticSearch than Solr, because this" "feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:" "Due to the complex input structure and index representation of shapes," "it is not currently possible to sort shapes or retrieve their fields directly." "The geo_shape value is only retrievable through the _source field." " Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html") return {"error": {"msg": msg}} else: msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance" return {"error": {"msg": msg}} if a_text_limit: # getting most frequently occurring users. text_limit = { "terms": { "field": "abstract", "size": a_text_limit } } aggs_dic['popular_text'] = text_limit if a_user_limit: # getting most frequently occurring users. users_limit = { "terms": { "field": "layer_originator", "size": a_user_limit } } aggs_dic['popular_users'] = users_limit if a_time_limit: # TODO: Work in progress, a_time_limit is incomplete. # TODO: when times are * it does not work. also a a_time_gap is not required. if q_time: if not a_time_gap: # getting time limit histogram. time_limt = { "date_range": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "ranges": [ {"from": gte, "to": lte} ] } } aggs_dic['range'] = time_limt else: pass else: msg = "If you want to use a_time_limit feature, q_time MUST BE initialized" return {"error": {"msg": msg}} if a_time_gap: interval = gap_to_elastic(a_time_gap) time_gap = { "date_histogram": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "interval": interval } } aggs_dic['articles_over_time'] = time_gap # adding aggreations on body query if aggs_dic: dic_query['aggs'] = aggs_dic try: res = requests.post(search_engine_endpoint, data=json.dumps(dic_query)) except Exception as e: return 500, {"error": {"msg": str(e)}} es_response = res.json() if original_response: return es_response data = {} if 'error' in es_response: data["error"] = es_response["error"] return 400, data data["request_url"] = res.url data["request_body"] = json.dumps(dic_query) data["a.matchDocs"] = es_response['hits']['total'] docs = [] # aggreations response: facets searching if 'aggregations' in es_response: aggs = es_response['aggregations'] # getting the most frequently occurring users. if 'popular_users' in aggs: a_users_list_array = [] users_resp = aggs["popular_users"]["buckets"] for item in users_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_users_list_array.append(temp) data["a.user"] = a_users_list_array # getting most frequently ocurring words if 'popular_text' in aggs: a_text_list_array = [] text_resp = es_response["aggregations"]["popular_text"]["buckets"] for item in text_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_text_list_array.append(temp) data["a.text"] = a_text_list_array if 'articles_over_time' in aggs: gap_count = [] a_gap = {} gap_resp = aggs["articles_over_time"]["buckets"] start = "*" end = "*" if len(gap_resp) > 0: start = gap_resp[0]['key_as_string'].replace('+0000', 'z') end = gap_resp[-1]['key_as_string'].replace('+0000', 'z') a_gap['start'] = start a_gap['end'] = end a_gap['gap'] = a_time_gap for item in gap_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key_as_string'].replace('+0000', 'z') gap_count.append(temp) a_gap['counts'] = gap_count data['a.time'] = a_gap if 'range' in aggs: # Work in progress # Pay attention in the following code lines: Make it better!!!! time_count = [] time_resp = aggs["range"]["buckets"] a_time = {} a_time['start'] = gte a_time['end'] = lte a_time['gap'] = None for item in time_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key'].replace('+0000', 'z') time_count.append(temp) a_time['counts'] = time_count data['a.time'] = a_time if not int(d_docs_limit) == 0: for item in es_response['hits']['hits']: # data temp = item['_source']['abstract'] temp = temp.replace(u'\u201c', "\"") temp = temp.replace(u'\u201d', "\"") temp = temp.replace('"', "\"") temp = temp.replace("'", "\'") temp = temp.replace(u'\u2019', "\'") item['_source']['abstract'] = temp docs.append(item['_source']) data["d.docs"] = docs return data
[ "def", "elasticsearch", "(", "serializer", ",", "catalog", ")", ":", "search_engine_endpoint", "=", "\"{0}/{1}/_search\"", ".", "format", "(", "SEARCH_URL", ",", "catalog", ".", "slug", ")", "q_text", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_text\"", ")", "q_time", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_time\"", ")", "q_geo", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_geo\"", ")", "q_user", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_user\"", ")", "d_docs_sort", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_sort\"", ")", "d_docs_limit", "=", "int", "(", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_limit\"", ")", ")", "d_docs_page", "=", "int", "(", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_page\"", ")", ")", "a_text_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_text_limit\"", ")", "a_user_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_user_limit\"", ")", "a_time_gap", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_gap\"", ")", "a_time_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_limit\"", ")", "original_response", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"original_response\"", ")", "# Dict for search on Elastic engine", "must_array", "=", "[", "]", "filter_dic", "=", "{", "}", "aggs_dic", "=", "{", "}", "# get ES version to make the query builder to be backward compatible with", "# diffs versions.", "# TODO: move this to a proper place. maybe ES client?.", "# TODO: cache it to avoid overwhelm ES with this call.", "# TODO: ask for ES_VERSION when building queries with an elegant way.", "ES_VERSION", "=", "2", "response", "=", "requests", ".", "get", "(", "SEARCH_URL", ")", "if", "response", ".", "ok", ":", "# looks ugly but will work on normal ES response for \"/\".", "ES_VERSION", "=", "int", "(", "response", ".", "json", "(", ")", "[", "\"version\"", "]", "[", "\"number\"", "]", "[", "0", "]", ")", "# String searching", "if", "q_text", ":", "# Wrapping query string into a query filter.", "if", "ES_VERSION", ">=", "2", ":", "query_string", "=", "{", "\"query_string\"", ":", "{", "\"query\"", ":", "q_text", "}", "}", "else", ":", "query_string", "=", "{", "\"query\"", ":", "{", "\"query_string\"", ":", "{", "\"query\"", ":", "q_text", "}", "}", "}", "# add string searching", "must_array", ".", "append", "(", "query_string", ")", "if", "q_time", ":", "# check if q_time exists", "q_time", "=", "str", "(", "q_time", ")", "# check string", "shortener", "=", "q_time", "[", "1", ":", "-", "1", "]", "shortener", "=", "shortener", ".", "split", "(", "\" TO \"", ")", "gte", "=", "shortener", "[", "0", "]", "# greater than", "lte", "=", "shortener", "[", "1", "]", "# less than", "layer_date", "=", "{", "}", "if", "gte", "==", "'*'", "and", "lte", "!=", "'*'", ":", "layer_date", "[", "\"lte\"", "]", "=", "lte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "if", "gte", "!=", "'*'", "and", "lte", "==", "'*'", ":", "layer_date", "[", "\"gte\"", "]", "=", "gte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "if", "gte", "!=", "'*'", "and", "lte", "!=", "'*'", ":", "layer_date", "[", "\"gte\"", "]", "=", "gte", "layer_date", "[", "\"lte\"", "]", "=", "lte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "# geo_shape searching", "if", "q_geo", ":", "q_geo", "=", "str", "(", "q_geo", ")", "q_geo", "=", "q_geo", "[", "1", ":", "-", "1", "]", "Ymin", ",", "Xmin", "=", "q_geo", ".", "split", "(", "\" TO \"", ")", "[", "0", "]", ".", "split", "(", "\",\"", ")", "Ymax", ",", "Xmax", "=", "q_geo", ".", "split", "(", "\" TO \"", ")", "[", "1", "]", ".", "split", "(", "\",\"", ")", "geoshape_query", "=", "{", "\"layer_geoshape\"", ":", "{", "\"shape\"", ":", "{", "\"type\"", ":", "\"envelope\"", ",", "\"coordinates\"", ":", "[", "[", "Xmin", ",", "Ymax", "]", ",", "[", "Xmax", ",", "Ymin", "]", "]", "}", ",", "\"relation\"", ":", "\"intersects\"", "}", "}", "filter_dic", "[", "\"geo_shape\"", "]", "=", "geoshape_query", "if", "q_user", ":", "# Using q_user", "user_searching", "=", "{", "\"match\"", ":", "{", "\"layer_originator\"", ":", "q_user", "}", "}", "must_array", ".", "append", "(", "user_searching", ")", "if", "ES_VERSION", ">=", "2", ":", "dic_query", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "must_array", ",", "\"filter\"", ":", "filter_dic", "}", "}", "}", "else", ":", "dic_query", "=", "{", "\"query\"", ":", "{", "\"filtered\"", ":", "{", "\"filter\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "must_array", ",", "\"should\"", ":", "filter_dic", "}", "}", "}", "}", "}", "# Page", "if", "d_docs_limit", ":", "dic_query", "[", "\"size\"", "]", "=", "d_docs_limit", "if", "d_docs_page", ":", "dic_query", "[", "\"from\"", "]", "=", "d_docs_limit", "*", "d_docs_page", "-", "d_docs_limit", "if", "d_docs_sort", "==", "\"score\"", ":", "dic_query", "[", "\"sort\"", "]", "=", "{", "\"_score\"", ":", "{", "\"order\"", ":", "\"desc\"", "}", "}", "if", "d_docs_sort", "==", "\"time\"", ":", "dic_query", "[", "\"sort\"", "]", "=", "{", "\"layer_date\"", ":", "{", "\"order\"", ":", "\"desc\"", "}", "}", "if", "d_docs_sort", "==", "\"distance\"", ":", "if", "q_geo", ":", "# distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5))", "# distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5))", "msg", "=", "(", "\"Sorting by distance is different on ElasticSearch than Solr, because this\"", "\"feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:\"", "\"Due to the complex input structure and index representation of shapes,\"", "\"it is not currently possible to sort shapes or retrieve their fields directly.\"", "\"The geo_shape value is only retrievable through the _source field.\"", "\" Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html\"", ")", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "else", ":", "msg", "=", "\"q_qeo MUST BE NO ZERO if you wanna sort by distance\"", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "if", "a_text_limit", ":", "# getting most frequently occurring users.", "text_limit", "=", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"abstract\"", ",", "\"size\"", ":", "a_text_limit", "}", "}", "aggs_dic", "[", "'popular_text'", "]", "=", "text_limit", "if", "a_user_limit", ":", "# getting most frequently occurring users.", "users_limit", "=", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"layer_originator\"", ",", "\"size\"", ":", "a_user_limit", "}", "}", "aggs_dic", "[", "'popular_users'", "]", "=", "users_limit", "if", "a_time_limit", ":", "# TODO: Work in progress, a_time_limit is incomplete.", "# TODO: when times are * it does not work. also a a_time_gap is not required.", "if", "q_time", ":", "if", "not", "a_time_gap", ":", "# getting time limit histogram.", "time_limt", "=", "{", "\"date_range\"", ":", "{", "\"field\"", ":", "\"layer_date\"", ",", "\"format\"", ":", "\"yyyy-MM-dd'T'HH:mm:ssZ\"", ",", "\"ranges\"", ":", "[", "{", "\"from\"", ":", "gte", ",", "\"to\"", ":", "lte", "}", "]", "}", "}", "aggs_dic", "[", "'range'", "]", "=", "time_limt", "else", ":", "pass", "else", ":", "msg", "=", "\"If you want to use a_time_limit feature, q_time MUST BE initialized\"", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "if", "a_time_gap", ":", "interval", "=", "gap_to_elastic", "(", "a_time_gap", ")", "time_gap", "=", "{", "\"date_histogram\"", ":", "{", "\"field\"", ":", "\"layer_date\"", ",", "\"format\"", ":", "\"yyyy-MM-dd'T'HH:mm:ssZ\"", ",", "\"interval\"", ":", "interval", "}", "}", "aggs_dic", "[", "'articles_over_time'", "]", "=", "time_gap", "# adding aggreations on body query", "if", "aggs_dic", ":", "dic_query", "[", "'aggs'", "]", "=", "aggs_dic", "try", ":", "res", "=", "requests", ".", "post", "(", "search_engine_endpoint", ",", "data", "=", "json", ".", "dumps", "(", "dic_query", ")", ")", "except", "Exception", "as", "e", ":", "return", "500", ",", "{", "\"error\"", ":", "{", "\"msg\"", ":", "str", "(", "e", ")", "}", "}", "es_response", "=", "res", ".", "json", "(", ")", "if", "original_response", ":", "return", "es_response", "data", "=", "{", "}", "if", "'error'", "in", "es_response", ":", "data", "[", "\"error\"", "]", "=", "es_response", "[", "\"error\"", "]", "return", "400", ",", "data", "data", "[", "\"request_url\"", "]", "=", "res", ".", "url", "data", "[", "\"request_body\"", "]", "=", "json", ".", "dumps", "(", "dic_query", ")", "data", "[", "\"a.matchDocs\"", "]", "=", "es_response", "[", "'hits'", "]", "[", "'total'", "]", "docs", "=", "[", "]", "# aggreations response: facets searching", "if", "'aggregations'", "in", "es_response", ":", "aggs", "=", "es_response", "[", "'aggregations'", "]", "# getting the most frequently occurring users.", "if", "'popular_users'", "in", "aggs", ":", "a_users_list_array", "=", "[", "]", "users_resp", "=", "aggs", "[", "\"popular_users\"", "]", "[", "\"buckets\"", "]", "for", "item", "in", "users_resp", ":", "temp", "=", "{", "}", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", "a_users_list_array", ".", "append", "(", "temp", ")", "data", "[", "\"a.user\"", "]", "=", "a_users_list_array", "# getting most frequently ocurring words", "if", "'popular_text'", "in", "aggs", ":", "a_text_list_array", "=", "[", "]", "text_resp", "=", "es_response", "[", "\"aggregations\"", "]", "[", "\"popular_text\"", "]", "[", "\"buckets\"", "]", "for", "item", "in", "text_resp", ":", "temp", "=", "{", "}", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", "a_text_list_array", ".", "append", "(", "temp", ")", "data", "[", "\"a.text\"", "]", "=", "a_text_list_array", "if", "'articles_over_time'", "in", "aggs", ":", "gap_count", "=", "[", "]", "a_gap", "=", "{", "}", "gap_resp", "=", "aggs", "[", "\"articles_over_time\"", "]", "[", "\"buckets\"", "]", "start", "=", "\"*\"", "end", "=", "\"*\"", "if", "len", "(", "gap_resp", ")", ">", "0", ":", "start", "=", "gap_resp", "[", "0", "]", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "end", "=", "gap_resp", "[", "-", "1", "]", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "a_gap", "[", "'start'", "]", "=", "start", "a_gap", "[", "'end'", "]", "=", "end", "a_gap", "[", "'gap'", "]", "=", "a_time_gap", "for", "item", "in", "gap_resp", ":", "temp", "=", "{", "}", "if", "item", "[", "'doc_count'", "]", "!=", "0", ":", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "gap_count", ".", "append", "(", "temp", ")", "a_gap", "[", "'counts'", "]", "=", "gap_count", "data", "[", "'a.time'", "]", "=", "a_gap", "if", "'range'", "in", "aggs", ":", "# Work in progress", "# Pay attention in the following code lines: Make it better!!!!", "time_count", "=", "[", "]", "time_resp", "=", "aggs", "[", "\"range\"", "]", "[", "\"buckets\"", "]", "a_time", "=", "{", "}", "a_time", "[", "'start'", "]", "=", "gte", "a_time", "[", "'end'", "]", "=", "lte", "a_time", "[", "'gap'", "]", "=", "None", "for", "item", "in", "time_resp", ":", "temp", "=", "{", "}", "if", "item", "[", "'doc_count'", "]", "!=", "0", ":", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "time_count", ".", "append", "(", "temp", ")", "a_time", "[", "'counts'", "]", "=", "time_count", "data", "[", "'a.time'", "]", "=", "a_time", "if", "not", "int", "(", "d_docs_limit", ")", "==", "0", ":", "for", "item", "in", "es_response", "[", "'hits'", "]", "[", "'hits'", "]", ":", "# data", "temp", "=", "item", "[", "'_source'", "]", "[", "'abstract'", "]", "temp", "=", "temp", ".", "replace", "(", "u'\\u201c'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "u'\\u201d'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "'\"'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "\"'\"", ",", "\"\\'\"", ")", "temp", "=", "temp", ".", "replace", "(", "u'\\u2019'", ",", "\"\\'\"", ")", "item", "[", "'_source'", "]", "[", "'abstract'", "]", "=", "temp", "docs", ".", "append", "(", "item", "[", "'_source'", "]", ")", "data", "[", "\"d.docs\"", "]", "=", "docs", "return", "data" ]
https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return:
[ "https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "_the_search_api", ".", "html", ":", "param", "serializer", ":", ":", "return", ":" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L32-L361
train
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
solr
def solr(serializer): """ Search on solr endpoint :param serializer: :return: """ search_engine_endpoint = serializer.validated_data.get("search_engine_endpoint") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_text = serializer.validated_data.get("q_text") q_user = serializer.validated_data.get("q_user") d_docs_limit = serializer.validated_data.get("d_docs_limit") d_docs_page = serializer.validated_data.get("d_docs_page") d_docs_sort = serializer.validated_data.get("d_docs_sort") a_time_limit = serializer.validated_data.get("a_time_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_filter = serializer.validated_data.get("a_time_filter") a_hm_limit = serializer.validated_data.get("a_hm_limit") a_hm_gridlevel = serializer.validated_data.get("a_hm_gridlevel") a_hm_filter = serializer.validated_data.get("a_hm_filter") a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") original_response = serializer.validated_data.get("original_response") # query params to be sent via restful solr params = { "q": "*:*", "indent": "on", "wt": "json", "rows": d_docs_limit, "facet": "off", "facet.field": [], "debug": "timing" } if q_text: params["q"] = q_text if d_docs_limit >= 0: d_docs_page -= 1 d_docs_page = d_docs_limit * d_docs_page params["start"] = d_docs_page # query params for filters filters = [] if q_time: # TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z # TODO: "Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'" # Kotlin like: "{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]" # then do it simple: filters.append("{0}:{1}".format(TIME_FILTER_FIELD, q_time)) if q_geo: filters.append("{0}:{1}".format(GEO_FILTER_FIELD, q_geo)) if q_user: filters.append("{{!field f={0} tag={0}}}{1}".format(USER_FIELD, q_user)) if filters: params["fq"] = filters # query params for ordering if d_docs_sort == 'score' and q_text: params["sort"] = 'score desc' elif d_docs_sort == 'time': params["sort"] = '{} desc'.format(TIME_SORT_FIELD) elif d_docs_sort == 'distance': rectangle = parse_geo_box(q_geo) params["sort"] = 'geodist() asc' params["sfield"] = GEO_SORT_FIELD params["pt"] = '{0},{1}'.format(rectangle.centroid.x, rectangle.centroid.y) # query params for facets if a_time_limit > 0: params["facet"] = 'on' time_filter = a_time_filter or q_time or None # traduce * to actual min/max dates. time_filter = asterisk_to_min_max(TIME_FILTER_FIELD, time_filter, search_engine_endpoint) # create the range faceting params. facet_parms = request_time_facet(TIME_FILTER_FIELD, time_filter, a_time_gap, a_time_limit) params.update(facet_parms) if a_hm_limit > 0: params["facet"] = 'on' hm_facet_params = request_heatmap_facet(GEO_HEATMAP_FIELD, a_hm_filter, a_hm_gridlevel, a_hm_limit) params.update(hm_facet_params) if a_text_limit > 0: params["facet"] = 'on' params["facet.field"].append(TEXT_FIELD) params["f.{}.facet.limit".format(TEXT_FIELD)] = a_text_limit if a_user_limit > 0: params["facet"] = 'on' params["facet.field"].append("{{! ex={0}}}{0}".format(USER_FIELD)) params["f.{}.facet.limit".format(USER_FIELD)] = a_user_limit try: res = requests.get( search_engine_endpoint, params=params ) except Exception as e: return 500, {"error": {"msg": str(e)}} print '>', res.url solr_response = res.json() solr_response["solr_request"] = res.url if original_response > 0: return solr_response # create the response dict following the swagger model: data = {} if 'error' in solr_response: data["error"] = solr_response["error"] return 400, data response = solr_response["response"] data["a.matchDocs"] = response.get("numFound") if response.get("docs"): data["d.docs"] = response.get("docs") if a_time_limit > 0: date_facet = solr_response["facet_counts"]["facet_ranges"][TIME_FILTER_FIELD] counts = [] value_count = iter(date_facet.get("counts")) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) a_time = { "start": date_facet.get("start"), "end": date_facet.get("end"), "gap": date_facet.get("gap"), "counts": counts } data["a.time"] = a_time if a_hm_limit > 0: hm_facet_raw = solr_response["facet_counts"]["facet_heatmaps"][GEO_HEATMAP_FIELD] hm_facet = { 'gridLevel': hm_facet_raw[1], 'columns': hm_facet_raw[3], 'rows': hm_facet_raw[5], 'minX': hm_facet_raw[7], 'maxX': hm_facet_raw[9], 'minY': hm_facet_raw[11], 'maxY': hm_facet_raw[13], 'counts_ints2D': hm_facet_raw[15], 'projection': 'EPSG:4326' } data["a.hm"] = hm_facet if a_user_limit > 0: user_facet = solr_response["facet_counts"]["facet_fields"][USER_FIELD] counts = [] value_count = iter(user_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.user"] = counts if a_text_limit > 0: text_facet = solr_response["facet_counts"]["facet_fields"][TEXT_FIELD] counts = [] value_count = iter(text_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.text"] = counts subs = [] for label, values in solr_response["debug"]["timing"].iteritems(): if type(values) is not dict: continue subs_data = {"label": label, "subs": []} for label, values in values.iteritems(): if type(values) is not dict: subs_data["millis"] = values continue subs_data["subs"].append({ "label": label, "millis": values.get("time") }) subs.append(subs_data) timing = { "label": "requests.get.elapsed", "millis": res.elapsed, "subs": [{ "label": "QTime", "millis": solr_response["responseHeader"].get("QTime"), "subs": subs }] } data["timing"] = timing data["request_url"] = res.url return data
python
def solr(serializer): """ Search on solr endpoint :param serializer: :return: """ search_engine_endpoint = serializer.validated_data.get("search_engine_endpoint") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_text = serializer.validated_data.get("q_text") q_user = serializer.validated_data.get("q_user") d_docs_limit = serializer.validated_data.get("d_docs_limit") d_docs_page = serializer.validated_data.get("d_docs_page") d_docs_sort = serializer.validated_data.get("d_docs_sort") a_time_limit = serializer.validated_data.get("a_time_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_filter = serializer.validated_data.get("a_time_filter") a_hm_limit = serializer.validated_data.get("a_hm_limit") a_hm_gridlevel = serializer.validated_data.get("a_hm_gridlevel") a_hm_filter = serializer.validated_data.get("a_hm_filter") a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") original_response = serializer.validated_data.get("original_response") # query params to be sent via restful solr params = { "q": "*:*", "indent": "on", "wt": "json", "rows": d_docs_limit, "facet": "off", "facet.field": [], "debug": "timing" } if q_text: params["q"] = q_text if d_docs_limit >= 0: d_docs_page -= 1 d_docs_page = d_docs_limit * d_docs_page params["start"] = d_docs_page # query params for filters filters = [] if q_time: # TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z # TODO: "Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'" # Kotlin like: "{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]" # then do it simple: filters.append("{0}:{1}".format(TIME_FILTER_FIELD, q_time)) if q_geo: filters.append("{0}:{1}".format(GEO_FILTER_FIELD, q_geo)) if q_user: filters.append("{{!field f={0} tag={0}}}{1}".format(USER_FIELD, q_user)) if filters: params["fq"] = filters # query params for ordering if d_docs_sort == 'score' and q_text: params["sort"] = 'score desc' elif d_docs_sort == 'time': params["sort"] = '{} desc'.format(TIME_SORT_FIELD) elif d_docs_sort == 'distance': rectangle = parse_geo_box(q_geo) params["sort"] = 'geodist() asc' params["sfield"] = GEO_SORT_FIELD params["pt"] = '{0},{1}'.format(rectangle.centroid.x, rectangle.centroid.y) # query params for facets if a_time_limit > 0: params["facet"] = 'on' time_filter = a_time_filter or q_time or None # traduce * to actual min/max dates. time_filter = asterisk_to_min_max(TIME_FILTER_FIELD, time_filter, search_engine_endpoint) # create the range faceting params. facet_parms = request_time_facet(TIME_FILTER_FIELD, time_filter, a_time_gap, a_time_limit) params.update(facet_parms) if a_hm_limit > 0: params["facet"] = 'on' hm_facet_params = request_heatmap_facet(GEO_HEATMAP_FIELD, a_hm_filter, a_hm_gridlevel, a_hm_limit) params.update(hm_facet_params) if a_text_limit > 0: params["facet"] = 'on' params["facet.field"].append(TEXT_FIELD) params["f.{}.facet.limit".format(TEXT_FIELD)] = a_text_limit if a_user_limit > 0: params["facet"] = 'on' params["facet.field"].append("{{! ex={0}}}{0}".format(USER_FIELD)) params["f.{}.facet.limit".format(USER_FIELD)] = a_user_limit try: res = requests.get( search_engine_endpoint, params=params ) except Exception as e: return 500, {"error": {"msg": str(e)}} print '>', res.url solr_response = res.json() solr_response["solr_request"] = res.url if original_response > 0: return solr_response # create the response dict following the swagger model: data = {} if 'error' in solr_response: data["error"] = solr_response["error"] return 400, data response = solr_response["response"] data["a.matchDocs"] = response.get("numFound") if response.get("docs"): data["d.docs"] = response.get("docs") if a_time_limit > 0: date_facet = solr_response["facet_counts"]["facet_ranges"][TIME_FILTER_FIELD] counts = [] value_count = iter(date_facet.get("counts")) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) a_time = { "start": date_facet.get("start"), "end": date_facet.get("end"), "gap": date_facet.get("gap"), "counts": counts } data["a.time"] = a_time if a_hm_limit > 0: hm_facet_raw = solr_response["facet_counts"]["facet_heatmaps"][GEO_HEATMAP_FIELD] hm_facet = { 'gridLevel': hm_facet_raw[1], 'columns': hm_facet_raw[3], 'rows': hm_facet_raw[5], 'minX': hm_facet_raw[7], 'maxX': hm_facet_raw[9], 'minY': hm_facet_raw[11], 'maxY': hm_facet_raw[13], 'counts_ints2D': hm_facet_raw[15], 'projection': 'EPSG:4326' } data["a.hm"] = hm_facet if a_user_limit > 0: user_facet = solr_response["facet_counts"]["facet_fields"][USER_FIELD] counts = [] value_count = iter(user_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.user"] = counts if a_text_limit > 0: text_facet = solr_response["facet_counts"]["facet_fields"][TEXT_FIELD] counts = [] value_count = iter(text_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.text"] = counts subs = [] for label, values in solr_response["debug"]["timing"].iteritems(): if type(values) is not dict: continue subs_data = {"label": label, "subs": []} for label, values in values.iteritems(): if type(values) is not dict: subs_data["millis"] = values continue subs_data["subs"].append({ "label": label, "millis": values.get("time") }) subs.append(subs_data) timing = { "label": "requests.get.elapsed", "millis": res.elapsed, "subs": [{ "label": "QTime", "millis": solr_response["responseHeader"].get("QTime"), "subs": subs }] } data["timing"] = timing data["request_url"] = res.url return data
[ "def", "solr", "(", "serializer", ")", ":", "search_engine_endpoint", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"search_engine_endpoint\"", ")", "q_time", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_time\"", ")", "q_geo", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_geo\"", ")", "q_text", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_text\"", ")", "q_user", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_user\"", ")", "d_docs_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_limit\"", ")", "d_docs_page", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_page\"", ")", "d_docs_sort", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_sort\"", ")", "a_time_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_limit\"", ")", "a_time_gap", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_gap\"", ")", "a_time_filter", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_filter\"", ")", "a_hm_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_limit\"", ")", "a_hm_gridlevel", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_gridlevel\"", ")", "a_hm_filter", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_filter\"", ")", "a_text_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_text_limit\"", ")", "a_user_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_user_limit\"", ")", "original_response", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"original_response\"", ")", "# query params to be sent via restful solr", "params", "=", "{", "\"q\"", ":", "\"*:*\"", ",", "\"indent\"", ":", "\"on\"", ",", "\"wt\"", ":", "\"json\"", ",", "\"rows\"", ":", "d_docs_limit", ",", "\"facet\"", ":", "\"off\"", ",", "\"facet.field\"", ":", "[", "]", ",", "\"debug\"", ":", "\"timing\"", "}", "if", "q_text", ":", "params", "[", "\"q\"", "]", "=", "q_text", "if", "d_docs_limit", ">=", "0", ":", "d_docs_page", "-=", "1", "d_docs_page", "=", "d_docs_limit", "*", "d_docs_page", "params", "[", "\"start\"", "]", "=", "d_docs_page", "# query params for filters", "filters", "=", "[", "]", "if", "q_time", ":", "# TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z", "# TODO: \"Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'\"", "# Kotlin like: \"{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]\"", "# then do it simple:", "filters", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "TIME_FILTER_FIELD", ",", "q_time", ")", ")", "if", "q_geo", ":", "filters", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "GEO_FILTER_FIELD", ",", "q_geo", ")", ")", "if", "q_user", ":", "filters", ".", "append", "(", "\"{{!field f={0} tag={0}}}{1}\"", ".", "format", "(", "USER_FIELD", ",", "q_user", ")", ")", "if", "filters", ":", "params", "[", "\"fq\"", "]", "=", "filters", "# query params for ordering", "if", "d_docs_sort", "==", "'score'", "and", "q_text", ":", "params", "[", "\"sort\"", "]", "=", "'score desc'", "elif", "d_docs_sort", "==", "'time'", ":", "params", "[", "\"sort\"", "]", "=", "'{} desc'", ".", "format", "(", "TIME_SORT_FIELD", ")", "elif", "d_docs_sort", "==", "'distance'", ":", "rectangle", "=", "parse_geo_box", "(", "q_geo", ")", "params", "[", "\"sort\"", "]", "=", "'geodist() asc'", "params", "[", "\"sfield\"", "]", "=", "GEO_SORT_FIELD", "params", "[", "\"pt\"", "]", "=", "'{0},{1}'", ".", "format", "(", "rectangle", ".", "centroid", ".", "x", ",", "rectangle", ".", "centroid", ".", "y", ")", "# query params for facets", "if", "a_time_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "time_filter", "=", "a_time_filter", "or", "q_time", "or", "None", "# traduce * to actual min/max dates.", "time_filter", "=", "asterisk_to_min_max", "(", "TIME_FILTER_FIELD", ",", "time_filter", ",", "search_engine_endpoint", ")", "# create the range faceting params.", "facet_parms", "=", "request_time_facet", "(", "TIME_FILTER_FIELD", ",", "time_filter", ",", "a_time_gap", ",", "a_time_limit", ")", "params", ".", "update", "(", "facet_parms", ")", "if", "a_hm_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "hm_facet_params", "=", "request_heatmap_facet", "(", "GEO_HEATMAP_FIELD", ",", "a_hm_filter", ",", "a_hm_gridlevel", ",", "a_hm_limit", ")", "params", ".", "update", "(", "hm_facet_params", ")", "if", "a_text_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "params", "[", "\"facet.field\"", "]", ".", "append", "(", "TEXT_FIELD", ")", "params", "[", "\"f.{}.facet.limit\"", ".", "format", "(", "TEXT_FIELD", ")", "]", "=", "a_text_limit", "if", "a_user_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "params", "[", "\"facet.field\"", "]", ".", "append", "(", "\"{{! ex={0}}}{0}\"", ".", "format", "(", "USER_FIELD", ")", ")", "params", "[", "\"f.{}.facet.limit\"", ".", "format", "(", "USER_FIELD", ")", "]", "=", "a_user_limit", "try", ":", "res", "=", "requests", ".", "get", "(", "search_engine_endpoint", ",", "params", "=", "params", ")", "except", "Exception", "as", "e", ":", "return", "500", ",", "{", "\"error\"", ":", "{", "\"msg\"", ":", "str", "(", "e", ")", "}", "}", "print", "'>'", ",", "res", ".", "url", "solr_response", "=", "res", ".", "json", "(", ")", "solr_response", "[", "\"solr_request\"", "]", "=", "res", ".", "url", "if", "original_response", ">", "0", ":", "return", "solr_response", "# create the response dict following the swagger model:", "data", "=", "{", "}", "if", "'error'", "in", "solr_response", ":", "data", "[", "\"error\"", "]", "=", "solr_response", "[", "\"error\"", "]", "return", "400", ",", "data", "response", "=", "solr_response", "[", "\"response\"", "]", "data", "[", "\"a.matchDocs\"", "]", "=", "response", ".", "get", "(", "\"numFound\"", ")", "if", "response", ".", "get", "(", "\"docs\"", ")", ":", "data", "[", "\"d.docs\"", "]", "=", "response", ".", "get", "(", "\"docs\"", ")", "if", "a_time_limit", ">", "0", ":", "date_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_ranges\"", "]", "[", "TIME_FILTER_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "date_facet", ".", "get", "(", "\"counts\"", ")", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "a_time", "=", "{", "\"start\"", ":", "date_facet", ".", "get", "(", "\"start\"", ")", ",", "\"end\"", ":", "date_facet", ".", "get", "(", "\"end\"", ")", ",", "\"gap\"", ":", "date_facet", ".", "get", "(", "\"gap\"", ")", ",", "\"counts\"", ":", "counts", "}", "data", "[", "\"a.time\"", "]", "=", "a_time", "if", "a_hm_limit", ">", "0", ":", "hm_facet_raw", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_heatmaps\"", "]", "[", "GEO_HEATMAP_FIELD", "]", "hm_facet", "=", "{", "'gridLevel'", ":", "hm_facet_raw", "[", "1", "]", ",", "'columns'", ":", "hm_facet_raw", "[", "3", "]", ",", "'rows'", ":", "hm_facet_raw", "[", "5", "]", ",", "'minX'", ":", "hm_facet_raw", "[", "7", "]", ",", "'maxX'", ":", "hm_facet_raw", "[", "9", "]", ",", "'minY'", ":", "hm_facet_raw", "[", "11", "]", ",", "'maxY'", ":", "hm_facet_raw", "[", "13", "]", ",", "'counts_ints2D'", ":", "hm_facet_raw", "[", "15", "]", ",", "'projection'", ":", "'EPSG:4326'", "}", "data", "[", "\"a.hm\"", "]", "=", "hm_facet", "if", "a_user_limit", ">", "0", ":", "user_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_fields\"", "]", "[", "USER_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "user_facet", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "data", "[", "\"a.user\"", "]", "=", "counts", "if", "a_text_limit", ">", "0", ":", "text_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_fields\"", "]", "[", "TEXT_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "text_facet", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "data", "[", "\"a.text\"", "]", "=", "counts", "subs", "=", "[", "]", "for", "label", ",", "values", "in", "solr_response", "[", "\"debug\"", "]", "[", "\"timing\"", "]", ".", "iteritems", "(", ")", ":", "if", "type", "(", "values", ")", "is", "not", "dict", ":", "continue", "subs_data", "=", "{", "\"label\"", ":", "label", ",", "\"subs\"", ":", "[", "]", "}", "for", "label", ",", "values", "in", "values", ".", "iteritems", "(", ")", ":", "if", "type", "(", "values", ")", "is", "not", "dict", ":", "subs_data", "[", "\"millis\"", "]", "=", "values", "continue", "subs_data", "[", "\"subs\"", "]", ".", "append", "(", "{", "\"label\"", ":", "label", ",", "\"millis\"", ":", "values", ".", "get", "(", "\"time\"", ")", "}", ")", "subs", ".", "append", "(", "subs_data", ")", "timing", "=", "{", "\"label\"", ":", "\"requests.get.elapsed\"", ",", "\"millis\"", ":", "res", ".", "elapsed", ",", "\"subs\"", ":", "[", "{", "\"label\"", ":", "\"QTime\"", ",", "\"millis\"", ":", "solr_response", "[", "\"responseHeader\"", "]", ".", "get", "(", "\"QTime\"", ")", ",", "\"subs\"", ":", "subs", "}", "]", "}", "data", "[", "\"timing\"", "]", "=", "timing", "data", "[", "\"request_url\"", "]", "=", "res", ".", "url", "return", "data" ]
Search on solr endpoint :param serializer: :return:
[ "Search", "on", "solr", "endpoint", ":", "param", "serializer", ":", ":", "return", ":" ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L364-L573
train
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
parse_get_params
def parse_get_params(request): """ parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params. """ get = request.GET.copy() new_get = request.GET.copy() for key in get.iterkeys(): if key.count(".") > 0: new_key = key.replace(".", "_") new_get[new_key] = get.get(key) del new_get[key] return new_get
python
def parse_get_params(request): """ parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params. """ get = request.GET.copy() new_get = request.GET.copy() for key in get.iterkeys(): if key.count(".") > 0: new_key = key.replace(".", "_") new_get[new_key] = get.get(key) del new_get[key] return new_get
[ "def", "parse_get_params", "(", "request", ")", ":", "get", "=", "request", ".", "GET", ".", "copy", "(", ")", "new_get", "=", "request", ".", "GET", ".", "copy", "(", ")", "for", "key", "in", "get", ".", "iterkeys", "(", ")", ":", "if", "key", ".", "count", "(", "\".\"", ")", ">", "0", ":", "new_key", "=", "key", ".", "replace", "(", "\".\"", ",", "\"_\"", ")", "new_get", "[", "new_key", "]", "=", "get", ".", "get", "(", "key", ")", "del", "new_get", "[", "key", "]", "return", "new_get" ]
parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params.
[ "parse", "all", "url", "get", "params", "that", "contains", "dots", "in", "a", "representation", "of", "serializer", "field", "names", "for", "example", ":", "d", ".", "docs", ".", "limit", "to", "d_docs_limit", ".", "that", "makes", "compatible", "an", "actual", "API", "client", "with", "django", "-", "rest", "-", "framework", "serializers", ".", ":", "param", "request", ":", ":", "return", ":", "QueryDict", "with", "parsed", "get", "params", "." ]
899a5385b15af7fba190ab4fae1d41e47d155a1b
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L576-L594
train
konikvranik/pyCEC
pycec/tcp.py
main
def main(): """For testing purpose""" tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False) hdmi_network = HDMINetwork(tcp_adapter) hdmi_network.start() while True: for d in hdmi_network.devices: _LOGGER.info("Device: %s", d) time.sleep(7)
python
def main(): """For testing purpose""" tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False) hdmi_network = HDMINetwork(tcp_adapter) hdmi_network.start() while True: for d in hdmi_network.devices: _LOGGER.info("Device: %s", d) time.sleep(7)
[ "def", "main", "(", ")", ":", "tcp_adapter", "=", "TcpAdapter", "(", "\"192.168.1.3\"", ",", "name", "=", "\"HASS\"", ",", "activate_source", "=", "False", ")", "hdmi_network", "=", "HDMINetwork", "(", "tcp_adapter", ")", "hdmi_network", ".", "start", "(", ")", "while", "True", ":", "for", "d", "in", "hdmi_network", ".", "devices", ":", "_LOGGER", ".", "info", "(", "\"Device: %s\"", ",", "d", ")", "time", ".", "sleep", "(", "7", ")" ]
For testing purpose
[ "For", "testing", "purpose" ]
acf42a842d8a912ed68d63d8d6b653e6c405b29b
https://github.com/konikvranik/pyCEC/blob/acf42a842d8a912ed68d63d8d6b653e6c405b29b/pycec/tcp.py#L143-L152
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
compare_hexdigests
def compare_hexdigests( digest1, digest2 ): """Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different""" # convert to 32-tuple of unsighed two-byte INTs digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)]) digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)]) bits = 0 for i in range(32): bits += POPC[255 & digest1[i] ^ digest2[i]] return 128 - bits
python
def compare_hexdigests( digest1, digest2 ): """Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different""" # convert to 32-tuple of unsighed two-byte INTs digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)]) digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)]) bits = 0 for i in range(32): bits += POPC[255 & digest1[i] ^ digest2[i]] return 128 - bits
[ "def", "compare_hexdigests", "(", "digest1", ",", "digest2", ")", ":", "# convert to 32-tuple of unsighed two-byte INTs", "digest1", "=", "tuple", "(", "[", "int", "(", "digest1", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "digest2", "=", "tuple", "(", "[", "int", "(", "digest2", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "bits", "=", "0", "for", "i", "in", "range", "(", "32", ")", ":", "bits", "+=", "POPC", "[", "255", "&", "digest1", "[", "i", "]", "^", "digest2", "[", "i", "]", "]", "return", "128", "-", "bits" ]
Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different
[ "Compute", "difference", "in", "bits", "between", "digest1", "and", "digest2", "returns", "-", "127", "to", "128", ";", "128", "is", "the", "same", "-", "127", "is", "different" ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L196-L205
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.tran3
def tran3(self, a, b, c, n): """Get accumulator for a transition n between chars a, b, c.""" return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
python
def tran3(self, a, b, c, n): """Get accumulator for a transition n between chars a, b, c.""" return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
[ "def", "tran3", "(", "self", ",", "a", ",", "b", ",", "c", ",", "n", ")", ":", "return", "(", "(", "(", "TRAN", "[", "(", "a", "+", "n", ")", "&", "255", "]", "^", "TRAN", "[", "b", "]", "*", "(", "n", "+", "n", "+", "1", ")", ")", "+", "TRAN", "[", "(", "c", ")", "^", "TRAN", "[", "n", "]", "]", ")", "&", "255", ")" ]
Get accumulator for a transition n between chars a, b, c.
[ "Get", "accumulator", "for", "a", "transition", "n", "between", "chars", "a", "b", "c", "." ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L117-L119
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.update
def update(self, data): """Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.""" for character in data: if PY3: ch = character else: ch = ord(character) self.count += 1 # incr accumulators for triplets if self.lastch[1] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1 if self.lastch[2] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1 if self.lastch[3] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1 self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1 # adjust last seen chars self.lastch = [ch] + self.lastch[:3]
python
def update(self, data): """Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.""" for character in data: if PY3: ch = character else: ch = ord(character) self.count += 1 # incr accumulators for triplets if self.lastch[1] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1 if self.lastch[2] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1 if self.lastch[3] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1 self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1 # adjust last seen chars self.lastch = [ch] + self.lastch[:3]
[ "def", "update", "(", "self", ",", "data", ")", ":", "for", "character", "in", "data", ":", "if", "PY3", ":", "ch", "=", "character", "else", ":", "ch", "=", "ord", "(", "character", ")", "self", ".", "count", "+=", "1", "# incr accumulators for triplets", "if", "self", ".", "lastch", "[", "1", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "1", "]", ",", "0", ")", "]", "+=", "1", "if", "self", ".", "lastch", "[", "2", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "1", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "1", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "2", ")", "]", "+=", "1", "if", "self", ".", "lastch", "[", "3", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "3", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "1", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "4", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "2", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "5", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "self", ".", "lastch", "[", "3", "]", ",", "self", ".", "lastch", "[", "0", "]", ",", "ch", ",", "6", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "self", ".", "lastch", "[", "3", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "ch", ",", "7", ")", "]", "+=", "1", "# adjust last seen chars", "self", ".", "lastch", "=", "[", "ch", "]", "+", "self", ".", "lastch", "[", ":", "3", "]" ]
Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.
[ "Add", "data", "to", "running", "digest", "increasing", "the", "accumulators", "for", "0", "-", "8", "triplets", "formed", "by", "this", "char", "and", "the", "previous", "0", "-", "3", "chars", "." ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L121-L145
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.digest
def digest(self): """Get digest of data seen thus far as a list of bytes.""" total = 0 # number of triplets seen if self.count == 3: # 3 chars = 1 triplet total = 1 elif self.count == 4: # 4 chars = 4 triplets total = 4 elif self.count > 4: # otherwise 8 triplets/char less total = 8 * self.count - 28 # 28 'missed' during 'ramp-up' threshold = total / 256 # threshold for accumulators, using the mean code = [0]*32 # start with all zero bits for i in range(256): # for all 256 accumulators if self.acc[i] > threshold: # if it meets the threshold code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8) return code[::-1]
python
def digest(self): """Get digest of data seen thus far as a list of bytes.""" total = 0 # number of triplets seen if self.count == 3: # 3 chars = 1 triplet total = 1 elif self.count == 4: # 4 chars = 4 triplets total = 4 elif self.count > 4: # otherwise 8 triplets/char less total = 8 * self.count - 28 # 28 'missed' during 'ramp-up' threshold = total / 256 # threshold for accumulators, using the mean code = [0]*32 # start with all zero bits for i in range(256): # for all 256 accumulators if self.acc[i] > threshold: # if it meets the threshold code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8) return code[::-1]
[ "def", "digest", "(", "self", ")", ":", "total", "=", "0", "# number of triplets seen", "if", "self", ".", "count", "==", "3", ":", "# 3 chars = 1 triplet", "total", "=", "1", "elif", "self", ".", "count", "==", "4", ":", "# 4 chars = 4 triplets", "total", "=", "4", "elif", "self", ".", "count", ">", "4", ":", "# otherwise 8 triplets/char less", "total", "=", "8", "*", "self", ".", "count", "-", "28", "# 28 'missed' during 'ramp-up'", "threshold", "=", "total", "/", "256", "# threshold for accumulators, using the mean", "code", "=", "[", "0", "]", "*", "32", "# start with all zero bits", "for", "i", "in", "range", "(", "256", ")", ":", "# for all 256 accumulators", "if", "self", ".", "acc", "[", "i", "]", ">", "threshold", ":", "# if it meets the threshold", "code", "[", "i", ">>", "3", "]", "+=", "1", "<<", "(", "i", "&", "7", ")", "# set corresponding digest bit, equivalent to i/8, 2 ** (i % 8)", "return", "code", "[", ":", ":", "-", "1", "]" ]
Get digest of data seen thus far as a list of bytes.
[ "Get", "digest", "of", "data", "seen", "thus", "far", "as", "a", "list", "of", "bytes", "." ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L147-L164
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.from_file
def from_file(self, filename): """Update running digest with content of named file.""" f = open(filename, 'rb') while True: data = f.read(10480) if not data: break self.update(data) f.close()
python
def from_file(self, filename): """Update running digest with content of named file.""" f = open(filename, 'rb') while True: data = f.read(10480) if not data: break self.update(data) f.close()
[ "def", "from_file", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'rb'", ")", "while", "True", ":", "data", "=", "f", ".", "read", "(", "10480", ")", "if", "not", "data", ":", "break", "self", ".", "update", "(", "data", ")", "f", ".", "close", "(", ")" ]
Update running digest with content of named file.
[ "Update", "running", "digest", "with", "content", "of", "named", "file", "." ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L174-L182
train
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.compare
def compare(self, otherdigest, ishex=False): """Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different""" bits = 0 myd = self.digest() if ishex: # convert to 32-tuple of unsighed two-byte INTs otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)]) for i in range(32): bits += POPC[255 & myd[i] ^ otherdigest[i]] return 128 - bits
python
def compare(self, otherdigest, ishex=False): """Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different""" bits = 0 myd = self.digest() if ishex: # convert to 32-tuple of unsighed two-byte INTs otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)]) for i in range(32): bits += POPC[255 & myd[i] ^ otherdigest[i]] return 128 - bits
[ "def", "compare", "(", "self", ",", "otherdigest", ",", "ishex", "=", "False", ")", ":", "bits", "=", "0", "myd", "=", "self", ".", "digest", "(", ")", "if", "ishex", ":", "# convert to 32-tuple of unsighed two-byte INTs", "otherdigest", "=", "tuple", "(", "[", "int", "(", "otherdigest", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "for", "i", "in", "range", "(", "32", ")", ":", "bits", "+=", "POPC", "[", "255", "&", "myd", "[", "i", "]", "^", "otherdigest", "[", "i", "]", "]", "return", "128", "-", "bits" ]
Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different
[ "Compute", "difference", "in", "bits", "between", "own", "digest", "and", "another", ".", "returns", "-", "127", "to", "128", ";", "128", "is", "the", "same", "-", "127", "is", "different" ]
c652f4bbfd836f7aebf292dcea676cc925ec315a
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L184-L194
train
CloudGenix/sdk-python
cloudgenix/__init__.py
jdout
def jdout(api_response): """ JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body """ try: # attempt to output the cgx_content. should always be a Dict if it exists. output = json.dumps(api_response.cgx_content, indent=4) except (TypeError, ValueError, AttributeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
python
def jdout(api_response): """ JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body """ try: # attempt to output the cgx_content. should always be a Dict if it exists. output = json.dumps(api_response.cgx_content, indent=4) except (TypeError, ValueError, AttributeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
[ "def", "jdout", "(", "api_response", ")", ":", "try", ":", "# attempt to output the cgx_content. should always be a Dict if it exists.", "output", "=", "json", ".", "dumps", "(", "api_response", ".", "cgx_content", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.", "try", ":", "output", "=", "json", ".", "dumps", "(", "api_response", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# Same issue, just raw output the passed data. Let any exceptions happen here.", "output", "=", "api_response", "return", "output" ]
JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body
[ "JD", "Output", "function", ".", "Does", "quick", "pretty", "printing", "of", "a", "CloudGenix", "Response", "body", ".", "This", "function", "returns", "a", "string", "instead", "of", "directly", "printing", "content", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L159-L180
train
CloudGenix/sdk-python
cloudgenix/__init__.py
jdout_detailed
def jdout_detailed(api_response, sensitive=False): """ JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body. """ try: # try to be super verbose. output = "REQUEST: {0} {1}\n".format(api_response.request.method, api_response.request.path_url) output += "REQUEST HEADERS:\n" for key, value in api_response.request.headers.items(): # look for sensitive values if key.lower() in ['cookie'] and not sensitive: # we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator cookie_list = value.split('; ') muted_cookie_list = [] for cookie in cookie_list: # check if cookie starts with a permutation of AUTH_TOKEN/whitespace. if cookie.lower().strip().startswith('auth_token='): # first 11 chars of cookie with whitespace removed + mute string. newcookie = cookie.strip()[:11] + "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"" muted_cookie_list.append(newcookie) else: muted_cookie_list.append(cookie) # got list of cookies, muted as needed. recombine. muted_value = "; ".join(muted_cookie_list) output += "\t{0}: {1}\n".format(key, muted_value) elif key.lower() in ['x-auth-token'] and not sensitive: output += "\t{0}: {1}\n".format(key, "<SENSITIVE - NOT SHOWN BY DEFAULT>") else: output += "\t{0}: {1}\n".format(key, value) # if body not present, output blank. if not api_response.request.body: output += "REQUEST BODY:\n{0}\n\n".format({}) else: try: # Attempt to load JSON from string to make it look beter. output += "REQUEST BODY:\n{0}\n\n".format(json.dumps(json.loads(api_response.request.body), indent=4)) except (TypeError, ValueError, AttributeError): # if pretty call above didn't work, just toss it to jdout to best effort it. output += "REQUEST BODY:\n{0}\n\n".format(jdout(api_response.request.body)) output += "RESPONSE: {0} {1}\n".format(api_response.status_code, api_response.reason) output += "RESPONSE HEADERS:\n" for key, value in api_response.headers.items(): output += "\t{0}: {1}\n".format(key, value) try: # look for CGX content first. output += "RESPONSE DATA:\n{0}".format(json.dumps(api_response.cgx_content, indent=4)) except (TypeError, ValueError, AttributeError): # look for standard response data. output += "RESPONSE DATA:\n{0}".format(json.dumps(json.loads(api_response.content), indent=4)) except (TypeError, ValueError, AttributeError, UnicodeDecodeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
python
def jdout_detailed(api_response, sensitive=False): """ JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body. """ try: # try to be super verbose. output = "REQUEST: {0} {1}\n".format(api_response.request.method, api_response.request.path_url) output += "REQUEST HEADERS:\n" for key, value in api_response.request.headers.items(): # look for sensitive values if key.lower() in ['cookie'] and not sensitive: # we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator cookie_list = value.split('; ') muted_cookie_list = [] for cookie in cookie_list: # check if cookie starts with a permutation of AUTH_TOKEN/whitespace. if cookie.lower().strip().startswith('auth_token='): # first 11 chars of cookie with whitespace removed + mute string. newcookie = cookie.strip()[:11] + "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"" muted_cookie_list.append(newcookie) else: muted_cookie_list.append(cookie) # got list of cookies, muted as needed. recombine. muted_value = "; ".join(muted_cookie_list) output += "\t{0}: {1}\n".format(key, muted_value) elif key.lower() in ['x-auth-token'] and not sensitive: output += "\t{0}: {1}\n".format(key, "<SENSITIVE - NOT SHOWN BY DEFAULT>") else: output += "\t{0}: {1}\n".format(key, value) # if body not present, output blank. if not api_response.request.body: output += "REQUEST BODY:\n{0}\n\n".format({}) else: try: # Attempt to load JSON from string to make it look beter. output += "REQUEST BODY:\n{0}\n\n".format(json.dumps(json.loads(api_response.request.body), indent=4)) except (TypeError, ValueError, AttributeError): # if pretty call above didn't work, just toss it to jdout to best effort it. output += "REQUEST BODY:\n{0}\n\n".format(jdout(api_response.request.body)) output += "RESPONSE: {0} {1}\n".format(api_response.status_code, api_response.reason) output += "RESPONSE HEADERS:\n" for key, value in api_response.headers.items(): output += "\t{0}: {1}\n".format(key, value) try: # look for CGX content first. output += "RESPONSE DATA:\n{0}".format(json.dumps(api_response.cgx_content, indent=4)) except (TypeError, ValueError, AttributeError): # look for standard response data. output += "RESPONSE DATA:\n{0}".format(json.dumps(json.loads(api_response.content), indent=4)) except (TypeError, ValueError, AttributeError, UnicodeDecodeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
[ "def", "jdout_detailed", "(", "api_response", ",", "sensitive", "=", "False", ")", ":", "try", ":", "# try to be super verbose.", "output", "=", "\"REQUEST: {0} {1}\\n\"", ".", "format", "(", "api_response", ".", "request", ".", "method", ",", "api_response", ".", "request", ".", "path_url", ")", "output", "+=", "\"REQUEST HEADERS:\\n\"", "for", "key", ",", "value", "in", "api_response", ".", "request", ".", "headers", ".", "items", "(", ")", ":", "# look for sensitive values", "if", "key", ".", "lower", "(", ")", "in", "[", "'cookie'", "]", "and", "not", "sensitive", ":", "# we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator", "cookie_list", "=", "value", ".", "split", "(", "'; '", ")", "muted_cookie_list", "=", "[", "]", "for", "cookie", "in", "cookie_list", ":", "# check if cookie starts with a permutation of AUTH_TOKEN/whitespace.", "if", "cookie", ".", "lower", "(", ")", ".", "strip", "(", ")", ".", "startswith", "(", "'auth_token='", ")", ":", "# first 11 chars of cookie with whitespace removed + mute string.", "newcookie", "=", "cookie", ".", "strip", "(", ")", "[", ":", "11", "]", "+", "\"\\\"<SENSITIVE - NOT SHOWN BY DEFAULT>\\\"\"", "muted_cookie_list", ".", "append", "(", "newcookie", ")", "else", ":", "muted_cookie_list", ".", "append", "(", "cookie", ")", "# got list of cookies, muted as needed. recombine.", "muted_value", "=", "\"; \"", ".", "join", "(", "muted_cookie_list", ")", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "muted_value", ")", "elif", "key", ".", "lower", "(", ")", "in", "[", "'x-auth-token'", "]", "and", "not", "sensitive", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"", ")", "else", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "value", ")", "# if body not present, output blank.", "if", "not", "api_response", ".", "request", ".", "body", ":", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "{", "}", ")", "else", ":", "try", ":", "# Attempt to load JSON from string to make it look beter.", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "json", ".", "loads", "(", "api_response", ".", "request", ".", "body", ")", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# if pretty call above didn't work, just toss it to jdout to best effort it.", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "jdout", "(", "api_response", ".", "request", ".", "body", ")", ")", "output", "+=", "\"RESPONSE: {0} {1}\\n\"", ".", "format", "(", "api_response", ".", "status_code", ",", "api_response", ".", "reason", ")", "output", "+=", "\"RESPONSE HEADERS:\\n\"", "for", "key", ",", "value", "in", "api_response", ".", "headers", ".", "items", "(", ")", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "value", ")", "try", ":", "# look for CGX content first.", "output", "+=", "\"RESPONSE DATA:\\n{0}\"", ".", "format", "(", "json", ".", "dumps", "(", "api_response", ".", "cgx_content", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# look for standard response data.", "output", "+=", "\"RESPONSE DATA:\\n{0}\"", ".", "format", "(", "json", ".", "dumps", "(", "json", ".", "loads", "(", "api_response", ".", "content", ")", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ",", "UnicodeDecodeError", ")", ":", "# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.", "try", ":", "output", "=", "json", ".", "dumps", "(", "api_response", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# Same issue, just raw output the passed data. Let any exceptions happen here.", "output", "=", "api_response", "return", "output" ]
JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body.
[ "JD", "Output", "Detailed", "function", ".", "Meant", "for", "quick", "DETAILED", "pretty", "-", "printing", "of", "CloudGenix", "Request", "and", "Response", "objects", "for", "troubleshooting", ".", "This", "function", "returns", "a", "string", "instead", "of", "directly", "printing", "content", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L201-L266
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.notify_for_new_version
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
python
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
[ "def", "notify_for_new_version", "(", "self", ")", ":", "# broad exception clause, if this fails for any reason just return.", "try", ":", "recommend_update", "=", "False", "update_check_resp", "=", "requests", ".", "get", "(", "self", ".", "update_info_url", ",", "timeout", "=", "3", ")", "web_version", "=", "update_check_resp", ".", "json", "(", ")", "[", "\"info\"", "]", "[", "\"version\"", "]", "api_logger", ".", "debug", "(", "\"RETRIEVED_VERSION: %s\"", ",", "web_version", ")", "available_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "web_version", ")", ".", "groupdict", "(", ")", "current_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "self", ".", "version", ")", ".", "groupdict", "(", ")", "available_major", "=", "available_version", ".", "get", "(", "'major'", ")", "available_minor", "=", "available_version", ".", "get", "(", "'minor'", ")", "available_patch", "=", "available_version", ".", "get", "(", "'patch'", ")", "available_build", "=", "available_version", ".", "get", "(", "'build'", ")", "current_major", "=", "current_version", ".", "get", "(", "'major'", ")", "current_minor", "=", "current_version", ".", "get", "(", "'minor'", ")", "current_patch", "=", "current_version", ".", "get", "(", "'patch'", ")", "current_build", "=", "current_version", ".", "get", "(", "'build'", ")", "api_logger", ".", "debug", "(", "\"AVAILABLE_VERSION: %s\"", ",", "available_version", ")", "api_logger", ".", "debug", "(", "\"CURRENT_VERSION: %s\"", ",", "current_version", ")", "# check for major/minor version differences, do not alert for build differences.", "if", "available_major", ">", "current_major", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">", "current_minor", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">=", "current_minor", "and", "available_patch", ">", "current_patch", ":", "recommend_update", "=", "True", "api_logger", ".", "debug", "(", "\"NEED_UPDATE: %s\"", ",", "recommend_update", ")", "# notify.", "if", "recommend_update", ":", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 \"", "\"months after release of a new version.\\n\"", "\"\\tLatest Version: {0}\\n\"", "\"\\tCurrent Version: {1}\\n\"", "\"\\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this \"", "\"message can be suppressed by instantiating the API with API(update_check=False).\\n\\n\"", "\"\"", ".", "format", "(", "web_version", ",", "self", ".", "version", ")", ")", "return", "except", "Exception", ":", "# just return and continue.", "return" ]
Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`.
[ "Check", "for", "a", "new", "version", "of", "the", "SDK", "on", "API", "constructor", "instantiation", ".", "If", "new", "version", "found", "print", "Notification", "to", "STDERR", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L446-L503
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.ssl_verify
def ssl_verify(self, ssl_verify): """ Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return. """ self.verify = ssl_verify # if verify true/false, set ca_verify_file appropriately if isinstance(self.verify, bool): if self.verify: # True if os.name == 'nt': # Windows does not allow tmpfile access w/out close. Close file then delete it when done. self._ca_verify_file_handle = temp_ca_bundle(delete=False) self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name self._ca_verify_file_handle.close() # Other (POSIX/Unix/Linux/OSX) else: self._ca_verify_file_handle = temp_ca_bundle() self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name # register cleanup function for temp file. atexit.register(self._cleanup_ca_temp_file) else: # False # disable warnings for SSL certs. urllib3.disable_warnings() self.ca_verify_filename = False else: # Not True/False, assume path to file/dir for Requests self.ca_verify_filename = self.verify return
python
def ssl_verify(self, ssl_verify): """ Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return. """ self.verify = ssl_verify # if verify true/false, set ca_verify_file appropriately if isinstance(self.verify, bool): if self.verify: # True if os.name == 'nt': # Windows does not allow tmpfile access w/out close. Close file then delete it when done. self._ca_verify_file_handle = temp_ca_bundle(delete=False) self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name self._ca_verify_file_handle.close() # Other (POSIX/Unix/Linux/OSX) else: self._ca_verify_file_handle = temp_ca_bundle() self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name # register cleanup function for temp file. atexit.register(self._cleanup_ca_temp_file) else: # False # disable warnings for SSL certs. urllib3.disable_warnings() self.ca_verify_filename = False else: # Not True/False, assume path to file/dir for Requests self.ca_verify_filename = self.verify return
[ "def", "ssl_verify", "(", "self", ",", "ssl_verify", ")", ":", "self", ".", "verify", "=", "ssl_verify", "# if verify true/false, set ca_verify_file appropriately", "if", "isinstance", "(", "self", ".", "verify", ",", "bool", ")", ":", "if", "self", ".", "verify", ":", "# True", "if", "os", ".", "name", "==", "'nt'", ":", "# Windows does not allow tmpfile access w/out close. Close file then delete it when done.", "self", ".", "_ca_verify_file_handle", "=", "temp_ca_bundle", "(", "delete", "=", "False", ")", "self", ".", "_ca_verify_file_handle", ".", "write", "(", "BYTE_CA_BUNDLE", ")", "self", ".", "_ca_verify_file_handle", ".", "flush", "(", ")", "self", ".", "ca_verify_filename", "=", "self", ".", "_ca_verify_file_handle", ".", "name", "self", ".", "_ca_verify_file_handle", ".", "close", "(", ")", "# Other (POSIX/Unix/Linux/OSX)", "else", ":", "self", ".", "_ca_verify_file_handle", "=", "temp_ca_bundle", "(", ")", "self", ".", "_ca_verify_file_handle", ".", "write", "(", "BYTE_CA_BUNDLE", ")", "self", ".", "_ca_verify_file_handle", ".", "flush", "(", ")", "self", ".", "ca_verify_filename", "=", "self", ".", "_ca_verify_file_handle", ".", "name", "# register cleanup function for temp file.", "atexit", ".", "register", "(", "self", ".", "_cleanup_ca_temp_file", ")", "else", ":", "# False", "# disable warnings for SSL certs.", "urllib3", ".", "disable_warnings", "(", ")", "self", ".", "ca_verify_filename", "=", "False", "else", ":", "# Not True/False, assume path to file/dir for Requests", "self", ".", "ca_verify_filename", "=", "self", ".", "verify", "return" ]
Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return.
[ "Modify", "ssl", "verification", "settings" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L505-L546
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.modify_rest_retry
def modify_rest_retry(self, total=8, connect=None, read=None, redirect=None, status=None, method_whitelist=urllib3.util.retry.Retry.DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0.705883, raise_on_redirect=True, raise_on_status=True, respect_retry_after_header=True, adapter_url="https://"): """ Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly """ # Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified. if status_forcelist is None: status_forcelist = (413, 429, 502, 503, 504) retry = urllib3.util.retry.Retry(total=total, connect=connect, read=read, redirect=redirect, status=status, method_whitelist=method_whitelist, status_forcelist=status_forcelist, backoff_factor=backoff_factor, raise_on_redirect=raise_on_redirect, raise_on_status=raise_on_status, respect_retry_after_header=respect_retry_after_header) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self._session.mount(adapter_url, adapter) return
python
def modify_rest_retry(self, total=8, connect=None, read=None, redirect=None, status=None, method_whitelist=urllib3.util.retry.Retry.DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0.705883, raise_on_redirect=True, raise_on_status=True, respect_retry_after_header=True, adapter_url="https://"): """ Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly """ # Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified. if status_forcelist is None: status_forcelist = (413, 429, 502, 503, 504) retry = urllib3.util.retry.Retry(total=total, connect=connect, read=read, redirect=redirect, status=status, method_whitelist=method_whitelist, status_forcelist=status_forcelist, backoff_factor=backoff_factor, raise_on_redirect=raise_on_redirect, raise_on_status=raise_on_status, respect_retry_after_header=respect_retry_after_header) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self._session.mount(adapter_url, adapter) return
[ "def", "modify_rest_retry", "(", "self", ",", "total", "=", "8", ",", "connect", "=", "None", ",", "read", "=", "None", ",", "redirect", "=", "None", ",", "status", "=", "None", ",", "method_whitelist", "=", "urllib3", ".", "util", ".", "retry", ".", "Retry", ".", "DEFAULT_METHOD_WHITELIST", ",", "status_forcelist", "=", "None", ",", "backoff_factor", "=", "0.705883", ",", "raise_on_redirect", "=", "True", ",", "raise_on_status", "=", "True", ",", "respect_retry_after_header", "=", "True", ",", "adapter_url", "=", "\"https://\"", ")", ":", "# Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified.", "if", "status_forcelist", "is", "None", ":", "status_forcelist", "=", "(", "413", ",", "429", ",", "502", ",", "503", ",", "504", ")", "retry", "=", "urllib3", ".", "util", ".", "retry", ".", "Retry", "(", "total", "=", "total", ",", "connect", "=", "connect", ",", "read", "=", "read", ",", "redirect", "=", "redirect", ",", "status", "=", "status", ",", "method_whitelist", "=", "method_whitelist", ",", "status_forcelist", "=", "status_forcelist", ",", "backoff_factor", "=", "backoff_factor", ",", "raise_on_redirect", "=", "raise_on_redirect", ",", "raise_on_status", "=", "raise_on_status", ",", "respect_retry_after_header", "=", "respect_retry_after_header", ")", "adapter", "=", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retry", ")", "self", ".", "_session", ".", "mount", "(", "adapter_url", ",", "adapter", ")", "return" ]
Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly
[ "Modify", "retry", "parameters", "for", "the", "SDK", "s", "rest", "call", "object", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L548-L604
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.view_rest_retry
def view_rest_retry(self, url=None): """ View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value. """ if url is None: url = "https://" return vars(self._session.get_adapter(url).max_retries)
python
def view_rest_retry(self, url=None): """ View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value. """ if url is None: url = "https://" return vars(self._session.get_adapter(url).max_retries)
[ "def", "view_rest_retry", "(", "self", ",", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "url", "=", "\"https://\"", "return", "vars", "(", "self", ".", "_session", ".", "get_adapter", "(", "url", ")", ".", "max_retries", ")" ]
View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value.
[ "View", "current", "rest", "retry", "settings", "in", "the", "requests", ".", "Session", "()", "object" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L606-L618
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.view_cookies
def view_cookies(self): """ View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict. """ return_list = [] for cookie in self._session.cookies: return_list.append(vars(cookie)) return return_list
python
def view_cookies(self): """ View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict. """ return_list = [] for cookie in self._session.cookies: return_list.append(vars(cookie)) return return_list
[ "def", "view_cookies", "(", "self", ")", ":", "return_list", "=", "[", "]", "for", "cookie", "in", "self", ".", "_session", ".", "cookies", ":", "return_list", ".", "append", "(", "vars", "(", "cookie", ")", ")", "return", "return_list" ]
View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict.
[ "View", "current", "cookies", "in", "the", "requests", ".", "Session", "()", "object" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L662-L672
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.set_debug
def set_debug(self, debuglevel): """ Change the debug level of the API **Returns:** No item returned. """ if isinstance(debuglevel, int): self._debuglevel = debuglevel if self._debuglevel == 1: logging.basicConfig(level=logging.INFO, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") api_logger.setLevel(logging.INFO) elif self._debuglevel == 2: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) elif self._debuglevel >= 3: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) urllib3_logger = logging.getLogger("requests.packages.urllib3") urllib3_logger.setLevel(logging.DEBUG) urllib3_logger.propagate = True else: # Remove all handlers for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set logging level to default requests.cookies.cookielib.debug = False api_logger.setLevel(logging.WARNING) return
python
def set_debug(self, debuglevel): """ Change the debug level of the API **Returns:** No item returned. """ if isinstance(debuglevel, int): self._debuglevel = debuglevel if self._debuglevel == 1: logging.basicConfig(level=logging.INFO, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") api_logger.setLevel(logging.INFO) elif self._debuglevel == 2: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) elif self._debuglevel >= 3: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) urllib3_logger = logging.getLogger("requests.packages.urllib3") urllib3_logger.setLevel(logging.DEBUG) urllib3_logger.propagate = True else: # Remove all handlers for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set logging level to default requests.cookies.cookielib.debug = False api_logger.setLevel(logging.WARNING) return
[ "def", "set_debug", "(", "self", ",", "debuglevel", ")", ":", "if", "isinstance", "(", "debuglevel", ",", "int", ")", ":", "self", ".", "_debuglevel", "=", "debuglevel", "if", "self", ".", "_debuglevel", "==", "1", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "api_logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "elif", "self", ".", "_debuglevel", "==", "2", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "True", "api_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "elif", "self", ".", "_debuglevel", ">=", "3", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "True", "api_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "urllib3_logger", "=", "logging", ".", "getLogger", "(", "\"requests.packages.urllib3\"", ")", "urllib3_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "urllib3_logger", ".", "propagate", "=", "True", "else", ":", "# Remove all handlers", "for", "handler", "in", "logging", ".", "root", ".", "handlers", "[", ":", "]", ":", "logging", ".", "root", ".", "removeHandler", "(", "handler", ")", "# set logging level to default", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "False", "api_logger", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "return" ]
Change the debug level of the API **Returns:** No item returned.
[ "Change", "the", "debug", "level", "of", "the", "API" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L674-L708
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API._subclass_container
def _subclass_container(self): """ Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references. """ _parent_class = self class GetWrapper(Get): def __init__(self): self._parent_class = _parent_class class PostWrapper(Post): def __init__(self): self._parent_class = _parent_class class PutWrapper(Put): def __init__(self): self._parent_class = _parent_class class PatchWrapper(Patch): def __init__(self): self._parent_class = _parent_class class DeleteWrapper(Delete): def __init__(self): self._parent_class = _parent_class class InteractiveWrapper(Interactive): def __init__(self): self._parent_class = _parent_class return {"get": GetWrapper, "post": PostWrapper, "put": PutWrapper, "patch": PatchWrapper, "delete": DeleteWrapper, "interactive": InteractiveWrapper}
python
def _subclass_container(self): """ Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references. """ _parent_class = self class GetWrapper(Get): def __init__(self): self._parent_class = _parent_class class PostWrapper(Post): def __init__(self): self._parent_class = _parent_class class PutWrapper(Put): def __init__(self): self._parent_class = _parent_class class PatchWrapper(Patch): def __init__(self): self._parent_class = _parent_class class DeleteWrapper(Delete): def __init__(self): self._parent_class = _parent_class class InteractiveWrapper(Interactive): def __init__(self): self._parent_class = _parent_class return {"get": GetWrapper, "post": PostWrapper, "put": PutWrapper, "patch": PatchWrapper, "delete": DeleteWrapper, "interactive": InteractiveWrapper}
[ "def", "_subclass_container", "(", "self", ")", ":", "_parent_class", "=", "self", "class", "GetWrapper", "(", "Get", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PostWrapper", "(", "Post", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PutWrapper", "(", "Put", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PatchWrapper", "(", "Patch", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "DeleteWrapper", "(", "Delete", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "InteractiveWrapper", "(", "Interactive", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "return", "{", "\"get\"", ":", "GetWrapper", ",", "\"post\"", ":", "PostWrapper", ",", "\"put\"", ":", "PutWrapper", ",", "\"patch\"", ":", "PatchWrapper", ",", "\"delete\"", ":", "DeleteWrapper", ",", "\"interactive\"", ":", "InteractiveWrapper", "}" ]
Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references.
[ "Call", "subclasses", "via", "function", "to", "allow", "passing", "parent", "namespace", "to", "subclasses", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L710-L753
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.rest_call
def rest_call(self, url, method, data=None, sensitive=False, timeout=None, content_json=True, retry=None, max_retry=None, retry_sleep=None): """ Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response. """ # pull retry related items from Constructor if not specified. if timeout is None: timeout = self.rest_call_timeout if retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if max_retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if retry_sleep is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") # Get logging level, use this to bypass logging functions with possible large content if not set. logger_level = api_logger.getEffectiveLevel() # populate headers and cookies from session. if content_json and method.lower() not in ['get', 'delete']: headers = { 'Content-Type': 'application/json' } else: headers = {} # add session headers headers.update(self._session.headers) cookie = self._session.cookies.get_dict() # make sure data is populated if present. if isinstance(data, (list, dict)): data = json.dumps(data) api_logger.debug('REST_CALL URL = %s', url) # make request try: if not sensitive: api_logger.debug('\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n', method.upper(), url, headers, cookie, data) # Actual request response = self._session.request(method, url, data=data, verify=self.ca_verify_filename, stream=True, timeout=timeout, headers=headers, allow_redirects=False) # Request complete - lets parse. # if it's a non-CGX-good response, return with cgx_status = False if response.status_code not in [requests.codes.ok, requests.codes.no_content, requests.codes.found, requests.codes.moved]: # Simple JSON debug if not sensitive: try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) try: api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE: %s\n', text_type(response.text)) else: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') api_logger.debug("Error, non-200 response received: %s", response.status_code) # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response else: # Simple JSON debug if not sensitive and (logger_level <= logging.DEBUG and logger_level != logging.NOTSET): try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) api_logger.debug('RESPONSE: %s\n', text_type(response.text)) elif sensitive: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') # CGX extend requests.Response for return response.cgx_status = True response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError)\ as e: api_logger.info("Error, %s.", text_type(e)) # make a requests.Response object for return since we didn't get one. response = requests.Response # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = { '_error': [ { 'message': 'REST Request Exception: {}'.format(e), 'data': {}, } ] } return response
python
def rest_call(self, url, method, data=None, sensitive=False, timeout=None, content_json=True, retry=None, max_retry=None, retry_sleep=None): """ Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response. """ # pull retry related items from Constructor if not specified. if timeout is None: timeout = self.rest_call_timeout if retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if max_retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if retry_sleep is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") # Get logging level, use this to bypass logging functions with possible large content if not set. logger_level = api_logger.getEffectiveLevel() # populate headers and cookies from session. if content_json and method.lower() not in ['get', 'delete']: headers = { 'Content-Type': 'application/json' } else: headers = {} # add session headers headers.update(self._session.headers) cookie = self._session.cookies.get_dict() # make sure data is populated if present. if isinstance(data, (list, dict)): data = json.dumps(data) api_logger.debug('REST_CALL URL = %s', url) # make request try: if not sensitive: api_logger.debug('\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n', method.upper(), url, headers, cookie, data) # Actual request response = self._session.request(method, url, data=data, verify=self.ca_verify_filename, stream=True, timeout=timeout, headers=headers, allow_redirects=False) # Request complete - lets parse. # if it's a non-CGX-good response, return with cgx_status = False if response.status_code not in [requests.codes.ok, requests.codes.no_content, requests.codes.found, requests.codes.moved]: # Simple JSON debug if not sensitive: try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) try: api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE: %s\n', text_type(response.text)) else: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') api_logger.debug("Error, non-200 response received: %s", response.status_code) # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response else: # Simple JSON debug if not sensitive and (logger_level <= logging.DEBUG and logger_level != logging.NOTSET): try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) api_logger.debug('RESPONSE: %s\n', text_type(response.text)) elif sensitive: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') # CGX extend requests.Response for return response.cgx_status = True response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError)\ as e: api_logger.info("Error, %s.", text_type(e)) # make a requests.Response object for return since we didn't get one. response = requests.Response # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = { '_error': [ { 'message': 'REST Request Exception: {}'.format(e), 'data': {}, } ] } return response
[ "def", "rest_call", "(", "self", ",", "url", ",", "method", ",", "data", "=", "None", ",", "sensitive", "=", "False", ",", "timeout", "=", "None", ",", "content_json", "=", "True", ",", "retry", "=", "None", ",", "max_retry", "=", "None", ",", "retry_sleep", "=", "None", ")", ":", "# pull retry related items from Constructor if not specified.", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "rest_call_timeout", "if", "retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "max_retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "retry_sleep", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "# Get logging level, use this to bypass logging functions with possible large content if not set.", "logger_level", "=", "api_logger", ".", "getEffectiveLevel", "(", ")", "# populate headers and cookies from session.", "if", "content_json", "and", "method", ".", "lower", "(", ")", "not", "in", "[", "'get'", ",", "'delete'", "]", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "else", ":", "headers", "=", "{", "}", "# add session headers", "headers", ".", "update", "(", "self", ".", "_session", ".", "headers", ")", "cookie", "=", "self", ".", "_session", ".", "cookies", ".", "get_dict", "(", ")", "# make sure data is populated if present.", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "api_logger", ".", "debug", "(", "'REST_CALL URL = %s'", ",", "url", ")", "# make request", "try", ":", "if", "not", "sensitive", ":", "api_logger", ".", "debug", "(", "'\\n\\tREQUEST: %s %s\\n\\tHEADERS: %s\\n\\tCOOKIES: %s\\n\\tDATA: %s\\n'", ",", "method", ".", "upper", "(", ")", ",", "url", ",", "headers", ",", "cookie", ",", "data", ")", "# Actual request", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ",", "verify", "=", "self", ".", "ca_verify_filename", ",", "stream", "=", "True", ",", "timeout", "=", "timeout", ",", "headers", "=", "headers", ",", "allow_redirects", "=", "False", ")", "# Request complete - lets parse.", "# if it's a non-CGX-good response, return with cgx_status = False", "if", "response", ".", "status_code", "not", "in", "[", "requests", ".", "codes", ".", "ok", ",", "requests", ".", "codes", ".", "no_content", ",", "requests", ".", "codes", ".", "found", ",", "requests", ".", "codes", ".", "moved", "]", ":", "# Simple JSON debug", "if", "not", "sensitive", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "else", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "api_logger", ".", "debug", "(", "\"Error, non-200 response received: %s\"", ",", "response", ".", "status_code", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "else", ":", "# Simple JSON debug", "if", "not", "sensitive", "and", "(", "logger_level", "<=", "logging", ".", "DEBUG", "and", "logger_level", "!=", "logging", ".", "NOTSET", ")", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "elif", "sensitive", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "True", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "except", "(", "requests", ".", "exceptions", ".", "Timeout", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "urllib3", ".", "exceptions", ".", "MaxRetryError", ")", "as", "e", ":", "api_logger", ".", "info", "(", "\"Error, %s.\"", ",", "text_type", "(", "e", ")", ")", "# make a requests.Response object for return since we didn't get one.", "response", "=", "requests", ".", "Response", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "{", "'_error'", ":", "[", "{", "'message'", ":", "'REST Request Exception: {}'", ".", "format", "(", "e", ")", ",", "'data'", ":", "{", "}", ",", "}", "]", "}", "return", "response" ]
Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response.
[ "Generic", "REST", "call", "worker", "function" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L755-L890
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API._cleanup_ca_temp_file
def _cleanup_ca_temp_file(self): """ Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return """ if os.name == 'nt': if isinstance(self.ca_verify_filename, (binary_type, text_type)): # windows requires file to be closed for access. Have to manually remove os.unlink(self.ca_verify_filename) else: # other OS's allow close and delete of file. self._ca_verify_file_handle.close()
python
def _cleanup_ca_temp_file(self): """ Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return """ if os.name == 'nt': if isinstance(self.ca_verify_filename, (binary_type, text_type)): # windows requires file to be closed for access. Have to manually remove os.unlink(self.ca_verify_filename) else: # other OS's allow close and delete of file. self._ca_verify_file_handle.close()
[ "def", "_cleanup_ca_temp_file", "(", "self", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "if", "isinstance", "(", "self", ".", "ca_verify_filename", ",", "(", "binary_type", ",", "text_type", ")", ")", ":", "# windows requires file to be closed for access. Have to manually remove", "os", ".", "unlink", "(", "self", ".", "ca_verify_filename", ")", "else", ":", "# other OS's allow close and delete of file.", "self", ".", "_ca_verify_file_handle", ".", "close", "(", ")" ]
Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return
[ "Function", "to", "clean", "up", "ca", "temp", "file", "for", "requests", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L892-L904
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.parse_auth_token
def parse_auth_token(self, auth_token): """ Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents """ # remove the random security key value from the front of the auth_token auth_token_cleaned = auth_token.split('-', 1)[1] # URL Decode the Auth Token auth_token_decoded = self.url_decode(auth_token_cleaned) # Create a new dict to hold the response. auth_dict = {} # Parse the token for key_value in auth_token_decoded.split("&"): key_value_list = key_value.split("=") # check for valid token parts if len(key_value_list) == 2 and type(key_value_list[0]) in [text_type, binary_type]: auth_dict[key_value_list[0]] = key_value_list[1] # Return the dict of key/values in the token. return auth_dict
python
def parse_auth_token(self, auth_token): """ Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents """ # remove the random security key value from the front of the auth_token auth_token_cleaned = auth_token.split('-', 1)[1] # URL Decode the Auth Token auth_token_decoded = self.url_decode(auth_token_cleaned) # Create a new dict to hold the response. auth_dict = {} # Parse the token for key_value in auth_token_decoded.split("&"): key_value_list = key_value.split("=") # check for valid token parts if len(key_value_list) == 2 and type(key_value_list[0]) in [text_type, binary_type]: auth_dict[key_value_list[0]] = key_value_list[1] # Return the dict of key/values in the token. return auth_dict
[ "def", "parse_auth_token", "(", "self", ",", "auth_token", ")", ":", "# remove the random security key value from the front of the auth_token", "auth_token_cleaned", "=", "auth_token", ".", "split", "(", "'-'", ",", "1", ")", "[", "1", "]", "# URL Decode the Auth Token", "auth_token_decoded", "=", "self", ".", "url_decode", "(", "auth_token_cleaned", ")", "# Create a new dict to hold the response.", "auth_dict", "=", "{", "}", "# Parse the token", "for", "key_value", "in", "auth_token_decoded", ".", "split", "(", "\"&\"", ")", ":", "key_value_list", "=", "key_value", ".", "split", "(", "\"=\"", ")", "# check for valid token parts", "if", "len", "(", "key_value_list", ")", "==", "2", "and", "type", "(", "key_value_list", "[", "0", "]", ")", "in", "[", "text_type", ",", "binary_type", "]", ":", "auth_dict", "[", "key_value_list", "[", "0", "]", "]", "=", "key_value_list", "[", "1", "]", "# Return the dict of key/values in the token.", "return", "auth_dict" ]
Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents
[ "Break", "auth_token", "up", "into", "it", "s", "constituent", "values", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L906-L931
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.update_region_to_controller
def update_region_to_controller(self, region): """ Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace """ # default region position in a list region_position = 1 # Check for a global "ignore region" flag if self.ignore_region: # bypass api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) # Check if this is an initial region use or an update region use if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller # splice controller string controller_full_part_list = controller_base.split('.') for idx, part in enumerate(controller_full_part_list): # is the region already in the controller string? if region == part: # yes, controller already has apropriate region api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) # update region if it is not already set. if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) # handle short domain case if controller_part_count > 1: # insert region controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: # short domain, just add region self.controller = ".".join(controller_full_part_list) + '.' + region # update SDK vars with region info self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
python
def update_region_to_controller(self, region): """ Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace """ # default region position in a list region_position = 1 # Check for a global "ignore region" flag if self.ignore_region: # bypass api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) # Check if this is an initial region use or an update region use if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller # splice controller string controller_full_part_list = controller_base.split('.') for idx, part in enumerate(controller_full_part_list): # is the region already in the controller string? if region == part: # yes, controller already has apropriate region api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) # update region if it is not already set. if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) # handle short domain case if controller_part_count > 1: # insert region controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: # short domain, just add region self.controller = ".".join(controller_full_part_list) + '.' + region # update SDK vars with region info self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
[ "def", "update_region_to_controller", "(", "self", ",", "region", ")", ":", "# default region position in a list", "region_position", "=", "1", "# Check for a global \"ignore region\" flag", "if", "self", ".", "ignore_region", ":", "# bypass", "api_logger", ".", "debug", "(", "\"IGNORE_REGION set, not updating controller region.\"", ")", "return", "api_logger", ".", "debug", "(", "\"Updating Controller Region\"", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "# Check if this is an initial region use or an update region use", "if", "self", ".", "controller_orig", ":", "controller_base", "=", "self", ".", "controller_orig", "else", ":", "controller_base", "=", "self", ".", "controller", "self", ".", "controller_orig", "=", "self", ".", "controller", "# splice controller string", "controller_full_part_list", "=", "controller_base", ".", "split", "(", "'.'", ")", "for", "idx", ",", "part", "in", "enumerate", "(", "controller_full_part_list", ")", ":", "# is the region already in the controller string?", "if", "region", "==", "part", ":", "# yes, controller already has apropriate region", "api_logger", ".", "debug", "(", "\"REGION %s ALREADY IN CONTROLLER AT INDEX = %s\"", ",", "region", ",", "idx", ")", "# update region if it is not already set.", "if", "self", ".", "controller_region", "!=", "region", ":", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return", "controller_part_count", "=", "len", "(", "controller_full_part_list", ")", "# handle short domain case", "if", "controller_part_count", ">", "1", ":", "# insert region", "controller_full_part_list", "[", "region_position", "]", "=", "region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "else", ":", "# short domain, just add region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "+", "'.'", "+", "region", "# update SDK vars with region info", "self", ".", "controller_orig", "=", "controller_base", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return" ]
Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace
[ "Update", "the", "controller", "string", "with", "dynamic", "region", "info", ".", "Controller", "string", "should", "end", "up", "as", "<name", "[", "-", "env", "]", ">", ".", "<region", ">", ".", "cloudgenix", ".", "com" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L933-L997
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.parse_region
def parse_region(self, login_response): """ Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name. """ auth_token = login_response.cgx_content['x_auth_token'] auth_token_dict = self.parse_auth_token(auth_token) auth_region = auth_token_dict.get('region') return auth_region
python
def parse_region(self, login_response): """ Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name. """ auth_token = login_response.cgx_content['x_auth_token'] auth_token_dict = self.parse_auth_token(auth_token) auth_region = auth_token_dict.get('region') return auth_region
[ "def", "parse_region", "(", "self", ",", "login_response", ")", ":", "auth_token", "=", "login_response", ".", "cgx_content", "[", "'x_auth_token'", "]", "auth_token_dict", "=", "self", ".", "parse_auth_token", "(", "auth_token", ")", "auth_region", "=", "auth_token_dict", ".", "get", "(", "'region'", ")", "return", "auth_region" ]
Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name.
[ "Return", "region", "from", "a", "successful", "login", "response", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L999-L1012
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.reparse_login_cookie_after_region_update
def reparse_login_cookie_after_region_update(self, login_response): """ Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return. """ login_url = login_response.request.url api_logger.debug("ORIGINAL REQUEST URL = %s", login_url) # replace old controller with new controller. login_url_new = login_url.replace(self.controller_orig, self.controller) api_logger.debug("UPDATED REQUEST URL = %s", login_url_new) # reset login url with new region login_response.request.url = login_url_new # prep cookie jar parsing req = requests.cookies.MockRequest(login_response.request) res = requests.cookies.MockResponse(login_response.raw._original_response.msg) # extract cookies to session cookie jar. self._session.cookies.extract_cookies(res, req) return
python
def reparse_login_cookie_after_region_update(self, login_response): """ Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return. """ login_url = login_response.request.url api_logger.debug("ORIGINAL REQUEST URL = %s", login_url) # replace old controller with new controller. login_url_new = login_url.replace(self.controller_orig, self.controller) api_logger.debug("UPDATED REQUEST URL = %s", login_url_new) # reset login url with new region login_response.request.url = login_url_new # prep cookie jar parsing req = requests.cookies.MockRequest(login_response.request) res = requests.cookies.MockResponse(login_response.raw._original_response.msg) # extract cookies to session cookie jar. self._session.cookies.extract_cookies(res, req) return
[ "def", "reparse_login_cookie_after_region_update", "(", "self", ",", "login_response", ")", ":", "login_url", "=", "login_response", ".", "request", ".", "url", "api_logger", ".", "debug", "(", "\"ORIGINAL REQUEST URL = %s\"", ",", "login_url", ")", "# replace old controller with new controller.", "login_url_new", "=", "login_url", ".", "replace", "(", "self", ".", "controller_orig", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"UPDATED REQUEST URL = %s\"", ",", "login_url_new", ")", "# reset login url with new region", "login_response", ".", "request", ".", "url", "=", "login_url_new", "# prep cookie jar parsing", "req", "=", "requests", ".", "cookies", ".", "MockRequest", "(", "login_response", ".", "request", ")", "res", "=", "requests", ".", "cookies", ".", "MockResponse", "(", "login_response", ".", "raw", ".", "_original_response", ".", "msg", ")", "# extract cookies to session cookie jar.", "self", ".", "_session", ".", "cookies", ".", "extract_cookies", "(", "res", ",", "req", ")", "return" ]
Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return.
[ "Sometimes", "login", "cookie", "gets", "sent", "with", "region", "info", "instead", "of", "api", ".", "cloudgenix", ".", "com", ".", "This", "function", "re", "-", "parses", "the", "original", "login", "request", "and", "applies", "cookies", "to", "the", "session", "if", "they", "now", "match", "the", "new", "region", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1014-L1038
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API._catch_nonjson_streamresponse
def _catch_nonjson_streamresponse(rawresponse): """ Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary """ # attempt to load response for return. try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { '_error': [ { 'message': 'Response not in JSON format.', 'data': rawresponse, } ] } else: # in case of null response, return empty dict. response = {} return response
python
def _catch_nonjson_streamresponse(rawresponse): """ Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary """ # attempt to load response for return. try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { '_error': [ { 'message': 'Response not in JSON format.', 'data': rawresponse, } ] } else: # in case of null response, return empty dict. response = {} return response
[ "def", "_catch_nonjson_streamresponse", "(", "rawresponse", ")", ":", "# attempt to load response for return.", "try", ":", "response", "=", "json", ".", "loads", "(", "rawresponse", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "if", "rawresponse", ":", "response", "=", "{", "'_error'", ":", "[", "{", "'message'", ":", "'Response not in JSON format.'", ",", "'data'", ":", "rawresponse", ",", "}", "]", "}", "else", ":", "# in case of null response, return empty dict.", "response", "=", "{", "}", "return", "response" ]
Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary
[ "Validate", "a", "streamed", "response", "is", "JSON", ".", "Return", "a", "Python", "dictionary", "either", "way", "." ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1041-L1069
train
CloudGenix/sdk-python
cloudgenix/__init__.py
API.url_decode
def url_decode(url): """ URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string """ return re.compile('%([0-9a-fA-F]{2})', re.M).sub(lambda m: chr(int(m.group(1), 16)), url)
python
def url_decode(url): """ URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string """ return re.compile('%([0-9a-fA-F]{2})', re.M).sub(lambda m: chr(int(m.group(1), 16)), url)
[ "def", "url_decode", "(", "url", ")", ":", "return", "re", ".", "compile", "(", "'%([0-9a-fA-F]{2})'", ",", "re", ".", "M", ")", ".", "sub", "(", "lambda", "m", ":", "chr", "(", "int", "(", "m", ".", "group", "(", "1", ")", ",", "16", ")", ")", ",", "url", ")" ]
URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string
[ "URL", "Decode", "function", "using", "REGEX" ]
1b2f92582b6a19769134914793bfd00e4caa074b
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1072-L1082
train
deplicate/deplicate
duplicate/utils/fs/nt.py
blksize
def blksize(path): """ Get optimal file system buffer size (in bytes) for I/O calls. """ diskfreespace = win32file.GetDiskFreeSpace dirname = os.path.dirname(fullpath(path)) try: cluster_sectors, sector_size = diskfreespace(dirname)[:2] size = cluster_sectors * sector_size except win32file.error as e: if e.winerror != winerror.ERROR_NOT_READY: raise sleep(3) size = blksize(dirname) return size
python
def blksize(path): """ Get optimal file system buffer size (in bytes) for I/O calls. """ diskfreespace = win32file.GetDiskFreeSpace dirname = os.path.dirname(fullpath(path)) try: cluster_sectors, sector_size = diskfreespace(dirname)[:2] size = cluster_sectors * sector_size except win32file.error as e: if e.winerror != winerror.ERROR_NOT_READY: raise sleep(3) size = blksize(dirname) return size
[ "def", "blksize", "(", "path", ")", ":", "diskfreespace", "=", "win32file", ".", "GetDiskFreeSpace", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "fullpath", "(", "path", ")", ")", "try", ":", "cluster_sectors", ",", "sector_size", "=", "diskfreespace", "(", "dirname", ")", "[", ":", "2", "]", "size", "=", "cluster_sectors", "*", "sector_size", "except", "win32file", ".", "error", "as", "e", ":", "if", "e", ".", "winerror", "!=", "winerror", ".", "ERROR_NOT_READY", ":", "raise", "sleep", "(", "3", ")", "size", "=", "blksize", "(", "dirname", ")", "return", "size" ]
Get optimal file system buffer size (in bytes) for I/O calls.
[ "Get", "optimal", "file", "system", "buffer", "size", "(", "in", "bytes", ")", "for", "I", "/", "O", "calls", "." ]
9975502571d1d024a990f5cb304d01b63c0d7717
https://github.com/deplicate/deplicate/blob/9975502571d1d024a990f5cb304d01b63c0d7717/duplicate/utils/fs/nt.py#L28-L44
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.gravatar
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False): """Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``. """ if include_extension: hash += '.jpg' default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT'] query_string = urlencode({'s': int(size), 'r': rating, 'd': default}) if force_default: query_string += '&q=y' return 'https://gravatar.com/avatar/' + hash + '?' + query_string
python
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False): """Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``. """ if include_extension: hash += '.jpg' default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT'] query_string = urlencode({'s': int(size), 'r': rating, 'd': default}) if force_default: query_string += '&q=y' return 'https://gravatar.com/avatar/' + hash + '?' + query_string
[ "def", "gravatar", "(", "hash", ",", "size", "=", "100", ",", "rating", "=", "'g'", ",", "default", "=", "'identicon'", ",", "include_extension", "=", "False", ",", "force_default", "=", "False", ")", ":", "if", "include_extension", ":", "hash", "+=", "'.jpg'", "default", "=", "default", "or", "current_app", ".", "config", "[", "'AVATARS_GRAVATAR_DEFAULT'", "]", "query_string", "=", "urlencode", "(", "{", "'s'", ":", "int", "(", "size", ")", ",", "'r'", ":", "rating", ",", "'d'", ":", "default", "}", ")", "if", "force_default", ":", "query_string", "+=", "'&q=y'", "return", "'https://gravatar.com/avatar/'", "+", "hash", "+", "'?'", "+", "query_string" ]
Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``.
[ "Pass", "email", "hash", "return", "Gravatar", "URL", ".", "You", "can", "get", "email", "hash", "like", "this", "::" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L27-L50
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.social_media
def social_media(username, platform='twitter', size='medium'): """Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large. """ return 'https://avatars.io/{platform}/{username}/{size}'.format( platform=platform, username=username, size=size)
python
def social_media(username, platform='twitter', size='medium'): """Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large. """ return 'https://avatars.io/{platform}/{username}/{size}'.format( platform=platform, username=username, size=size)
[ "def", "social_media", "(", "username", ",", "platform", "=", "'twitter'", ",", "size", "=", "'medium'", ")", ":", "return", "'https://avatars.io/{platform}/{username}/{size}'", ".", "format", "(", "platform", "=", "platform", ",", "username", "=", "username", ",", "size", "=", "size", ")" ]
Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large.
[ "Return", "avatar", "URL", "at", "social", "media", ".", "Visit", "https", ":", "//", "avatars", ".", "io", "for", "more", "information", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L63-L72
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.jcrop_css
def jcrop_css(css_url=None): """Load jcrop css file. :param css_url: The custom CSS URL. """ if css_url is None: if current_app.config['AVATARS_SERVE_LOCAL']: css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css') else: css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css' return Markup('<link rel="stylesheet" href="%s">' % css_url)
python
def jcrop_css(css_url=None): """Load jcrop css file. :param css_url: The custom CSS URL. """ if css_url is None: if current_app.config['AVATARS_SERVE_LOCAL']: css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css') else: css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css' return Markup('<link rel="stylesheet" href="%s">' % css_url)
[ "def", "jcrop_css", "(", "css_url", "=", "None", ")", ":", "if", "css_url", "is", "None", ":", "if", "current_app", ".", "config", "[", "'AVATARS_SERVE_LOCAL'", "]", ":", "css_url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/css/jquery.Jcrop.min.css'", ")", "else", ":", "css_url", "=", "'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'", "return", "Markup", "(", "'<link rel=\"stylesheet\" href=\"%s\">'", "%", "css_url", ")" ]
Load jcrop css file. :param css_url: The custom CSS URL.
[ "Load", "jcrop", "css", "file", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L84-L94
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.jcrop_js
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' else: jquery = '' return Markup('''%s\n<script src="%s"></script> ''' % (jquery, js_url))
python
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' else: jquery = '' return Markup('''%s\n<script src="%s"></script> ''' % (jquery, js_url))
[ "def", "jcrop_js", "(", "js_url", "=", "None", ",", "with_jquery", "=", "True", ")", ":", "serve_local", "=", "current_app", ".", "config", "[", "'AVATARS_SERVE_LOCAL'", "]", "if", "js_url", "is", "None", ":", "if", "serve_local", ":", "js_url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/js/jquery.Jcrop.min.js'", ")", "else", ":", "js_url", "=", "'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'", "if", "with_jquery", ":", "if", "serve_local", ":", "jquery", "=", "'<script src=\"%s\"></script>'", "%", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/js/jquery.min.js'", ")", "else", ":", "jquery", "=", "'<script src=\"https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js\"></script>'", "else", ":", "jquery", "=", "''", "return", "Markup", "(", "'''%s\\n<script src=\"%s\"></script>\n '''", "%", "(", "jquery", ",", "js_url", ")", ")" ]
Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``.
[ "Load", "jcrop", "Javascript", "file", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L97-L119
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.crop_box
def crop_box(endpoint=None, filename=None): """Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH'] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
python
def crop_box(endpoint=None, filename=None): """Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH'] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
[ "def", "crop_box", "(", "endpoint", "=", "None", ",", "filename", "=", "None", ")", ":", "crop_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_BASE_WIDTH'", "]", "if", "endpoint", "is", "None", "or", "filename", "is", "None", ":", "url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'default/default_l.jpg'", ")", "else", ":", "url", "=", "url_for", "(", "endpoint", ",", "filename", "=", "filename", ")", "return", "Markup", "(", "'<img src=\"%s\" id=\"crop-box\" style=\"max-width: %dpx; display: block;\">'", "%", "(", "url", ",", "crop_size", ")", ")" ]
Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop.
[ "Create", "a", "crop", "box", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L122-L134
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.preview_box
def preview_box(endpoint=None, filename=None): """Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup(''' <div id="preview-box"> <div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;"> <img src="%s" class="jcrop-preview" alt="Preview"/> </div> </div>''' % (preview_size, preview_size, url))
python
def preview_box(endpoint=None, filename=None): """Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup(''' <div id="preview-box"> <div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;"> <img src="%s" class="jcrop-preview" alt="Preview"/> </div> </div>''' % (preview_size, preview_size, url))
[ "def", "preview_box", "(", "endpoint", "=", "None", ",", "filename", "=", "None", ")", ":", "preview_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_PREVIEW_SIZE'", "]", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "if", "endpoint", "is", "None", "or", "filename", "is", "None", ":", "url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'default/default_l.jpg'", ")", "else", ":", "url", "=", "url_for", "(", "endpoint", ",", "filename", "=", "filename", ")", "return", "Markup", "(", "'''\n <div id=\"preview-box\">\n <div class=\"preview-box\" style=\"width: %dpx; height: %dpx; overflow: hidden;\">\n <img src=\"%s\" class=\"jcrop-preview\" alt=\"Preview\"/>\n </div>\n </div>'''", "%", "(", "preview_size", ",", "preview_size", ",", "url", ")", ")" ]
Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop.
[ "Create", "a", "preview", "box", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L137-L154
train
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.init_jcrop
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
python
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
[ "def", "init_jcrop", "(", "min_size", "=", "None", ")", ":", "init_x", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_POS'", "]", "[", "0", "]", "init_y", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_POS'", "]", "[", "1", "]", "init_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_SIZE'", "]", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "if", "current_app", ".", "config", "[", "'AVATARS_CROP_MIN_SIZE'", "]", ":", "min_size", "=", "min_size", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "min_size_js", "=", "'jcrop_api.setOptions({minSize: [%d, %d]});'", "%", "(", "min_size", ",", "min_size", ")", "else", ":", "min_size_js", "=", "''", "return", "Markup", "(", "'''\n<script type=\"text/javascript\">\n jQuery(function ($) {\n // Create variables (in this scope) to hold the API and image size\n var jcrop_api,\n boundx,\n boundy,\n\n // Grab some information about the preview pane\n $preview = $('#preview-box'),\n $pcnt = $('#preview-box .preview-box'),\n $pimg = $('#preview-box .preview-box img'),\n\n xsize = $pcnt.width(),\n ysize = $pcnt.height();\n\n $('#crop-box').Jcrop({\n onChange: updatePreview,\n onSelect: updateCoords,\n setSelect: [%s, %s, %s, %s],\n aspectRatio: 1\n }, function () {\n // Use the API to get the real image size\n var bounds = this.getBounds();\n boundx = bounds[0];\n boundy = bounds[1];\n // Store the API in the jcrop_api variable\n jcrop_api = this;\n %s\n jcrop_api.focus();\n // Move the preview into the jcrop container for css positioning\n $preview.appendTo(jcrop_api.ui.holder);\n });\n\n function updatePreview(c) {\n if (parseInt(c.w) > 0) {\n var rx = xsize / c.w;\n var ry = ysize / c.h;\n $pimg.css({\n width: Math.round(rx * boundx) + 'px',\n height: Math.round(ry * boundy) + 'px',\n marginLeft: '-' + Math.round(rx * c.x) + 'px',\n marginTop: '-' + Math.round(ry * c.y) + 'px'\n });\n }\n }\n });\n\n function updateCoords(c) {\n $('#x').val(c.x);\n $('#y').val(c.y);\n $('#w').val(c.w);\n $('#h').val(c.h);\n }\n </script>\n '''", "%", "(", "init_x", ",", "init_y", ",", "init_size", ",", "init_size", ",", "min_size_js", ")", ")" ]
Initialize jcrop. :param min_size: The minimal size of crop area.
[ "Initialize", "jcrop", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L157-L226
train
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.resize_avatar
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = (base_width / float(img.size[0])) h_size = int((float(img.size[1]) * float(w_percent))) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
python
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = (base_width / float(img.size[0])) h_size = int((float(img.size[1]) * float(w_percent))) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
[ "def", "resize_avatar", "(", "self", ",", "img", ",", "base_width", ")", ":", "w_percent", "=", "(", "base_width", "/", "float", "(", "img", ".", "size", "[", "0", "]", ")", ")", "h_size", "=", "int", "(", "(", "float", "(", "img", ".", "size", "[", "1", "]", ")", "*", "float", "(", "w_percent", ")", ")", ")", "img", "=", "img", ".", "resize", "(", "(", "base_width", ",", "h_size", ")", ",", "PIL", ".", "Image", ".", "ANTIALIAS", ")", "return", "img" ]
Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image.
[ "Resize", "an", "avatar", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L278-L287
train
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.save_avatar
def save_avatar(self, image): """Save an avatar as raw image, return new filename. :param image: The image that needs to be saved. """ path = current_app.config['AVATARS_SAVE_PATH'] filename = uuid4().hex + '_raw.png' image.save(os.path.join(path, filename)) return filename
python
def save_avatar(self, image): """Save an avatar as raw image, return new filename. :param image: The image that needs to be saved. """ path = current_app.config['AVATARS_SAVE_PATH'] filename = uuid4().hex + '_raw.png' image.save(os.path.join(path, filename)) return filename
[ "def", "save_avatar", "(", "self", ",", "image", ")", ":", "path", "=", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", "filename", "=", "uuid4", "(", ")", ".", "hex", "+", "'_raw.png'", "image", ".", "save", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", "return", "filename" ]
Save an avatar as raw image, return new filename. :param image: The image that needs to be saved.
[ "Save", "an", "avatar", "as", "raw", "image", "return", "new", "filename", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L289-L297
train
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.crop_avatar
def crop_avatar(self, filename, x, y, w, h): """Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height. """ x = int(x) y = int(y) w = int(w) h = int(h) sizes = current_app.config['AVATARS_SIZE_TUPLE'] if not filename: path = os.path.join(self.root_path, 'static/default/default_l.jpg') else: path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename) print(path) raw_img = Image.open(path) base_width = current_app.config['AVATARS_CROP_BASE_WIDTH'] if raw_img.size[0] >= base_width: raw_img = self.resize_avatar(raw_img, base_width=base_width) cropped_img = raw_img.crop((x, y, x + w, y + h)) filename = uuid4().hex avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0]) avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1]) avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2]) filename_s = filename + '_s.png' filename_m = filename + '_m.png' filename_l = filename + '_l.png' path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s) path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m) path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l) avatar_s.save(path_s, optimize=True, quality=85) avatar_m.save(path_m, optimize=True, quality=85) avatar_l.save(path_l, optimize=True, quality=85) return [filename_s, filename_m, filename_l]
python
def crop_avatar(self, filename, x, y, w, h): """Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height. """ x = int(x) y = int(y) w = int(w) h = int(h) sizes = current_app.config['AVATARS_SIZE_TUPLE'] if not filename: path = os.path.join(self.root_path, 'static/default/default_l.jpg') else: path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename) print(path) raw_img = Image.open(path) base_width = current_app.config['AVATARS_CROP_BASE_WIDTH'] if raw_img.size[0] >= base_width: raw_img = self.resize_avatar(raw_img, base_width=base_width) cropped_img = raw_img.crop((x, y, x + w, y + h)) filename = uuid4().hex avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0]) avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1]) avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2]) filename_s = filename + '_s.png' filename_m = filename + '_m.png' filename_l = filename + '_l.png' path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s) path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m) path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l) avatar_s.save(path_s, optimize=True, quality=85) avatar_m.save(path_m, optimize=True, quality=85) avatar_l.save(path_l, optimize=True, quality=85) return [filename_s, filename_m, filename_l]
[ "def", "crop_avatar", "(", "self", ",", "filename", ",", "x", ",", "y", ",", "w", ",", "h", ")", ":", "x", "=", "int", "(", "x", ")", "y", "=", "int", "(", "y", ")", "w", "=", "int", "(", "w", ")", "h", "=", "int", "(", "h", ")", "sizes", "=", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "if", "not", "filename", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "'static/default/default_l.jpg'", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename", ")", "print", "(", "path", ")", "raw_img", "=", "Image", ".", "open", "(", "path", ")", "base_width", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_BASE_WIDTH'", "]", "if", "raw_img", ".", "size", "[", "0", "]", ">=", "base_width", ":", "raw_img", "=", "self", ".", "resize_avatar", "(", "raw_img", ",", "base_width", "=", "base_width", ")", "cropped_img", "=", "raw_img", ".", "crop", "(", "(", "x", ",", "y", ",", "x", "+", "w", ",", "y", "+", "h", ")", ")", "filename", "=", "uuid4", "(", ")", ".", "hex", "avatar_s", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "0", "]", ")", "avatar_m", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "1", "]", ")", "avatar_l", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "2", "]", ")", "filename_s", "=", "filename", "+", "'_s.png'", "filename_m", "=", "filename", "+", "'_m.png'", "filename_l", "=", "filename", "+", "'_l.png'", "path_s", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_s", ")", "path_m", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_m", ")", "path_l", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_l", ")", "avatar_s", ".", "save", "(", "path_s", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "avatar_m", ".", "save", "(", "path_m", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "avatar_l", ".", "save", "(", "path_l", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "return", "[", "filename_s", ",", "filename_m", ",", "filename_l", "]" ]
Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height.
[ "Crop", "avatar", "with", "given", "size", "return", "a", "list", "of", "file", "name", ":", "[", "filename_s", "filename_m", "filename_l", "]", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L299-L349
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon.get_image
def get_image(self, string, width, height, pad=0): """ Byte representation of a PNG image """ hex_digest_byte_list = self._string_to_byte_list(string) matrix = self._create_matrix(hex_digest_byte_list) return self._create_image(matrix, width, height, pad)
python
def get_image(self, string, width, height, pad=0): """ Byte representation of a PNG image """ hex_digest_byte_list = self._string_to_byte_list(string) matrix = self._create_matrix(hex_digest_byte_list) return self._create_image(matrix, width, height, pad)
[ "def", "get_image", "(", "self", ",", "string", ",", "width", ",", "height", ",", "pad", "=", "0", ")", ":", "hex_digest_byte_list", "=", "self", ".", "_string_to_byte_list", "(", "string", ")", "matrix", "=", "self", ".", "_create_matrix", "(", "hex_digest_byte_list", ")", "return", "self", ".", "_create_image", "(", "matrix", ",", "width", ",", "height", ",", "pad", ")" ]
Byte representation of a PNG image
[ "Byte", "representation", "of", "a", "PNG", "image" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L72-L78
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._get_pastel_colour
def _get_pastel_colour(self, lighten=127): """ Create a pastel colour hex colour string """ def r(): return random.randint(0, 128) + lighten return r(), r(), r()
python
def _get_pastel_colour(self, lighten=127): """ Create a pastel colour hex colour string """ def r(): return random.randint(0, 128) + lighten return r(), r(), r()
[ "def", "_get_pastel_colour", "(", "self", ",", "lighten", "=", "127", ")", ":", "def", "r", "(", ")", ":", "return", "random", ".", "randint", "(", "0", ",", "128", ")", "+", "lighten", "return", "r", "(", ")", ",", "r", "(", ")", ",", "r", "(", ")" ]
Create a pastel colour hex colour string
[ "Create", "a", "pastel", "colour", "hex", "colour", "string" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L87-L93
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._luminance
def _luminance(self, rgb): """ Determine the liminanace of an RGB colour """ a = [] for v in rgb: v = v / float(255) if v < 0.03928: result = v / 12.92 else: result = math.pow(((v + 0.055) / 1.055), 2.4) a.append(result) return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
python
def _luminance(self, rgb): """ Determine the liminanace of an RGB colour """ a = [] for v in rgb: v = v / float(255) if v < 0.03928: result = v / 12.92 else: result = math.pow(((v + 0.055) / 1.055), 2.4) a.append(result) return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
[ "def", "_luminance", "(", "self", ",", "rgb", ")", ":", "a", "=", "[", "]", "for", "v", "in", "rgb", ":", "v", "=", "v", "/", "float", "(", "255", ")", "if", "v", "<", "0.03928", ":", "result", "=", "v", "/", "12.92", "else", ":", "result", "=", "math", ".", "pow", "(", "(", "(", "v", "+", "0.055", ")", "/", "1.055", ")", ",", "2.4", ")", "a", ".", "append", "(", "result", ")", "return", "a", "[", "0", "]", "*", "0.2126", "+", "a", "[", "1", "]", "*", "0.7152", "+", "a", "[", "2", "]", "*", "0.0722" ]
Determine the liminanace of an RGB colour
[ "Determine", "the", "liminanace", "of", "an", "RGB", "colour" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L95-L108
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._string_to_byte_list
def _string_to_byte_list(self, data): """ Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest) """ bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length) for num in range(bytes_length))
python
def _string_to_byte_list(self, data): """ Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest) """ bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length) for num in range(bytes_length))
[ "def", "_string_to_byte_list", "(", "self", ",", "data", ")", ":", "bytes_length", "=", "16", "m", "=", "self", ".", "digest", "(", ")", "m", ".", "update", "(", "str", ".", "encode", "(", "data", ")", ")", "hex_digest", "=", "m", ".", "hexdigest", "(", ")", "return", "list", "(", "int", "(", "hex_digest", "[", "num", "*", "2", ":", "num", "*", "2", "+", "2", "]", ",", "bytes_length", ")", "for", "num", "in", "range", "(", "bytes_length", ")", ")" ]
Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest)
[ "Creates", "a", "hex", "digest", "of", "the", "input", "string", "given", "to", "create", "the", "image", "if", "it", "s", "not", "already", "hexadecimal" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L110-L126
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._bit_is_one
def _bit_is_one(self, n, hash_bytes): """ Check if the n (index) of hash_bytes is 1 or 0. """ scale = 16 # hexadecimal if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
python
def _bit_is_one(self, n, hash_bytes): """ Check if the n (index) of hash_bytes is 1 or 0. """ scale = 16 # hexadecimal if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
[ "def", "_bit_is_one", "(", "self", ",", "n", ",", "hash_bytes", ")", ":", "scale", "=", "16", "# hexadecimal", "if", "not", "hash_bytes", "[", "int", "(", "n", "/", "(", "scale", "/", "2", ")", ")", "]", ">>", "int", "(", "(", "scale", "/", "2", ")", "-", "(", "(", "n", "%", "(", "scale", "/", "2", ")", ")", "+", "1", ")", ")", "&", "1", "==", "1", ":", "return", "False", "return", "True" ]
Check if the n (index) of hash_bytes is 1 or 0.
[ "Check", "if", "the", "n", "(", "index", ")", "of", "hash_bytes", "is", "1", "or", "0", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L128-L138
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._create_image
def _create_image(self, matrix, width, height, pad): """ Generates a PNG byte list """ image = Image.new("RGB", (width + (pad * 2), height + (pad * 2)), self.bg_colour) image_draw = ImageDraw.Draw(image) # Calculate the block width and height. block_width = float(width) / self.cols block_height = float(height) / self.rows # Loop through blocks in matrix, draw rectangles. for row, cols in enumerate(matrix): for col, cell in enumerate(cols): if cell: image_draw.rectangle(( pad + col * block_width, # x1 pad + row * block_height, # y1 pad + (col + 1) * block_width - 1, # x2 pad + (row + 1) * block_height - 1 # y2 ), fill=self.fg_colour) stream = BytesIO() image.save(stream, format="png", optimize=True) # return the image byte data return stream.getvalue()
python
def _create_image(self, matrix, width, height, pad): """ Generates a PNG byte list """ image = Image.new("RGB", (width + (pad * 2), height + (pad * 2)), self.bg_colour) image_draw = ImageDraw.Draw(image) # Calculate the block width and height. block_width = float(width) / self.cols block_height = float(height) / self.rows # Loop through blocks in matrix, draw rectangles. for row, cols in enumerate(matrix): for col, cell in enumerate(cols): if cell: image_draw.rectangle(( pad + col * block_width, # x1 pad + row * block_height, # y1 pad + (col + 1) * block_width - 1, # x2 pad + (row + 1) * block_height - 1 # y2 ), fill=self.fg_colour) stream = BytesIO() image.save(stream, format="png", optimize=True) # return the image byte data return stream.getvalue()
[ "def", "_create_image", "(", "self", ",", "matrix", ",", "width", ",", "height", ",", "pad", ")", ":", "image", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "(", "width", "+", "(", "pad", "*", "2", ")", ",", "height", "+", "(", "pad", "*", "2", ")", ")", ",", "self", ".", "bg_colour", ")", "image_draw", "=", "ImageDraw", ".", "Draw", "(", "image", ")", "# Calculate the block width and height.", "block_width", "=", "float", "(", "width", ")", "/", "self", ".", "cols", "block_height", "=", "float", "(", "height", ")", "/", "self", ".", "rows", "# Loop through blocks in matrix, draw rectangles.", "for", "row", ",", "cols", "in", "enumerate", "(", "matrix", ")", ":", "for", "col", ",", "cell", "in", "enumerate", "(", "cols", ")", ":", "if", "cell", ":", "image_draw", ".", "rectangle", "(", "(", "pad", "+", "col", "*", "block_width", ",", "# x1", "pad", "+", "row", "*", "block_height", ",", "# y1", "pad", "+", "(", "col", "+", "1", ")", "*", "block_width", "-", "1", ",", "# x2", "pad", "+", "(", "row", "+", "1", ")", "*", "block_height", "-", "1", "# y2", ")", ",", "fill", "=", "self", ".", "fg_colour", ")", "stream", "=", "BytesIO", "(", ")", "image", ".", "save", "(", "stream", ",", "format", "=", "\"png\"", ",", "optimize", "=", "True", ")", "# return the image byte data", "return", "stream", ".", "getvalue", "(", ")" ]
Generates a PNG byte list
[ "Generates", "a", "PNG", "byte", "list" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L140-L167
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._create_matrix
def _create_matrix(self, byte_list): """ This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]] """ # Number of rows * cols halfed and rounded # in order to fill opposite side cells = int(self.rows * self.cols / 2 + self.cols % 2) matrix = [[False] * self.cols for num in range(self.rows)] for cell_number in range(cells): # If the bit with index corresponding to this cell is 1 # mark that cell as fg_colour # Skip byte 1, that's used in determining fg_colour if self._bit_is_one(cell_number, byte_list[1:]): # Find cell coordinates in matrix. x_row = cell_number % self.rows y_col = int(cell_number / self.cols) # Set coord True and its opposite side matrix[x_row][self.cols - y_col - 1] = True matrix[x_row][y_col] = True return matrix
python
def _create_matrix(self, byte_list): """ This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]] """ # Number of rows * cols halfed and rounded # in order to fill opposite side cells = int(self.rows * self.cols / 2 + self.cols % 2) matrix = [[False] * self.cols for num in range(self.rows)] for cell_number in range(cells): # If the bit with index corresponding to this cell is 1 # mark that cell as fg_colour # Skip byte 1, that's used in determining fg_colour if self._bit_is_one(cell_number, byte_list[1:]): # Find cell coordinates in matrix. x_row = cell_number % self.rows y_col = int(cell_number / self.cols) # Set coord True and its opposite side matrix[x_row][self.cols - y_col - 1] = True matrix[x_row][y_col] = True return matrix
[ "def", "_create_matrix", "(", "self", ",", "byte_list", ")", ":", "# Number of rows * cols halfed and rounded", "# in order to fill opposite side", "cells", "=", "int", "(", "self", ".", "rows", "*", "self", ".", "cols", "/", "2", "+", "self", ".", "cols", "%", "2", ")", "matrix", "=", "[", "[", "False", "]", "*", "self", ".", "cols", "for", "num", "in", "range", "(", "self", ".", "rows", ")", "]", "for", "cell_number", "in", "range", "(", "cells", ")", ":", "# If the bit with index corresponding to this cell is 1", "# mark that cell as fg_colour", "# Skip byte 1, that's used in determining fg_colour", "if", "self", ".", "_bit_is_one", "(", "cell_number", ",", "byte_list", "[", "1", ":", "]", ")", ":", "# Find cell coordinates in matrix.", "x_row", "=", "cell_number", "%", "self", ".", "rows", "y_col", "=", "int", "(", "cell_number", "/", "self", ".", "cols", ")", "# Set coord True and its opposite side", "matrix", "[", "x_row", "]", "[", "self", ".", "cols", "-", "y_col", "-", "1", "]", "=", "True", "matrix", "[", "x_row", "]", "[", "y_col", "]", "=", "True", "return", "matrix" ]
This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]]
[ "This", "matrix", "decides", "which", "blocks", "should", "be", "filled", "fg", "/", "bg", "colour", "True", "for", "fg_colour", "False", "for", "bg_colour" ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L169-L203
train
greyli/flask-avatars
flask_avatars/identicon.py
Identicon.generate
def generate(self, text): """Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image. """ sizes = current_app.config['AVATARS_SIZE_TUPLE'] path = current_app.config['AVATARS_SAVE_PATH'] suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'} for size in sizes: image_byte_array = self.get_image( string=str(text), width=int(size), height=int(size), pad=int(size * 0.1)) self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size]))) return [text + '_s.png', text + '_m.png', text + '_l.png']
python
def generate(self, text): """Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image. """ sizes = current_app.config['AVATARS_SIZE_TUPLE'] path = current_app.config['AVATARS_SAVE_PATH'] suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'} for size in sizes: image_byte_array = self.get_image( string=str(text), width=int(size), height=int(size), pad=int(size * 0.1)) self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size]))) return [text + '_s.png', text + '_m.png', text + '_l.png']
[ "def", "generate", "(", "self", ",", "text", ")", ":", "sizes", "=", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "path", "=", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", "suffix", "=", "{", "sizes", "[", "0", "]", ":", "'s'", ",", "sizes", "[", "1", "]", ":", "'m'", ",", "sizes", "[", "2", "]", ":", "'l'", "}", "for", "size", "in", "sizes", ":", "image_byte_array", "=", "self", ".", "get_image", "(", "string", "=", "str", "(", "text", ")", ",", "width", "=", "int", "(", "size", ")", ",", "height", "=", "int", "(", "size", ")", ",", "pad", "=", "int", "(", "size", "*", "0.1", ")", ")", "self", ".", "save", "(", "image_byte_array", ",", "save_location", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%s_%s.png'", "%", "(", "text", ",", "suffix", "[", "size", "]", ")", ")", ")", "return", "[", "text", "+", "'_s.png'", ",", "text", "+", "'_m.png'", ",", "text", "+", "'_l.png'", "]" ]
Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image.
[ "Generate", "and", "save", "avatars", "return", "a", "list", "of", "file", "name", ":", "[", "filename_s", "filename_m", "filename_l", "]", "." ]
13eca90342349c58962fef0ec541edcb1b009c70
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L205-L221
train
rbuffat/pyepw
pyepw/epw.py
Location.read
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1
python
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1
[ "def", "read", "(", "self", ",", "vals", ")", ":", "i", "=", "0", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "city", "=", "None", "else", ":", "self", ".", "city", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "state_province_region", "=", "None", "else", ":", "self", ".", "state_province_region", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "country", "=", "None", "else", ":", "self", ".", "country", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "source", "=", "None", "else", ":", "self", ".", "source", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wmo", "=", "None", "else", ":", "self", ".", "wmo", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "latitude", "=", "None", "else", ":", "self", ".", "latitude", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "longitude", "=", "None", "else", ":", "self", ".", "longitude", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "timezone", "=", "None", "else", ":", "self", ".", "timezone", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "elevation", "=", "None", "else", ":", "self", ".", "elevation", "=", "vals", "[", "i", "]", "i", "+=", "1" ]
Read values. Args: vals (list): list of strings representing values
[ "Read", "values", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L35-L87
train
rbuffat/pyepw
pyepw/epw.py
Location.city
def city(self, value=None): """Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `city`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `city`') self._city = value
python
def city(self, value=None): """Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `city`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `city`') self._city = value
[ "def", "city", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `city`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `city`'", ")", "self", ".", "_city", "=", "value" ]
Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "city" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L100-L122
train
rbuffat/pyepw
pyepw/epw.py
Location.state_province_region
def state_province_region(self, value=None): """Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `state_province_region`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `state_province_region`') self._state_province_region = value
python
def state_province_region(self, value=None): """Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `state_province_region`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `state_province_region`') self._state_province_region = value
[ "def", "state_province_region", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `state_province_region`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `state_province_region`'", ")", "self", ".", "_state_province_region", "=", "value" ]
Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "state_province_region" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L135-L158
train
rbuffat/pyepw
pyepw/epw.py
Location.country
def country(self, value=None): """Corresponds to IDD Field `country` Args: value (str): value for IDD Field `country` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `country`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `country`') self._country = value
python
def country(self, value=None): """Corresponds to IDD Field `country` Args: value (str): value for IDD Field `country` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `country`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `country`') self._country = value
[ "def", "country", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `country`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `country`'", ")", "self", ".", "_country", "=", "value" ]
Corresponds to IDD Field `country` Args: value (str): value for IDD Field `country` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "country" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L171-L193
train
rbuffat/pyepw
pyepw/epw.py
Location.source
def source(self, value=None): """Corresponds to IDD Field `source` Args: value (str): value for IDD Field `source` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `source`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `source`') self._source = value
python
def source(self, value=None): """Corresponds to IDD Field `source` Args: value (str): value for IDD Field `source` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `source`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `source`') self._source = value
[ "def", "source", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `source`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `source`'", ")", "self", ".", "_source", "=", "value" ]
Corresponds to IDD Field `source` Args: value (str): value for IDD Field `source` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "source" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L206-L228
train
rbuffat/pyepw
pyepw/epw.py
Location.wmo
def wmo(self, value=None): """Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `wmo`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `wmo`') self._wmo = value
python
def wmo(self, value=None): """Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `wmo`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `wmo`') self._wmo = value
[ "def", "wmo", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `wmo`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `wmo`'", ")", "self", ".", "_wmo", "=", "value" ]
Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "wmo", "usually", "a", "6", "digit", "field", ".", "Used", "as", "alpha", "in", "EnergyPlus", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L241-L264
train
rbuffat/pyepw
pyepw/epw.py
Location.latitude
def latitude(self, value=0.0): """Corresponds to IDD Field `latitude` + is North, - is South, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `latitude` Unit: deg Default value: 0.0 value >= -90.0 value <= 90.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `latitude`'.format(value)) if value < -90.0: raise ValueError('value need to be greater or equal -90.0 ' 'for field `latitude`') if value > 90.0: raise ValueError('value need to be smaller 90.0 ' 'for field `latitude`') self._latitude = value
python
def latitude(self, value=0.0): """Corresponds to IDD Field `latitude` + is North, - is South, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `latitude` Unit: deg Default value: 0.0 value >= -90.0 value <= 90.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `latitude`'.format(value)) if value < -90.0: raise ValueError('value need to be greater or equal -90.0 ' 'for field `latitude`') if value > 90.0: raise ValueError('value need to be smaller 90.0 ' 'for field `latitude`') self._latitude = value
[ "def", "latitude", "(", "self", ",", "value", "=", "0.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `latitude`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "-", "90.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal -90.0 '", "'for field `latitude`'", ")", "if", "value", ">", "90.0", ":", "raise", "ValueError", "(", "'value need to be smaller 90.0 '", "'for field `latitude`'", ")", "self", ".", "_latitude", "=", "value" ]
Corresponds to IDD Field `latitude` + is North, - is South, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `latitude` Unit: deg Default value: 0.0 value >= -90.0 value <= 90.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "latitude" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L277-L308
train
rbuffat/pyepw
pyepw/epw.py
Location.longitude
def longitude(self, value=0.0): """Corresponds to IDD Field `longitude` - is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `longitude` Unit: deg Default value: 0.0 value >= -180.0 value <= 180.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `longitude`'.format(value)) if value < -180.0: raise ValueError('value need to be greater or equal -180.0 ' 'for field `longitude`') if value > 180.0: raise ValueError('value need to be smaller 180.0 ' 'for field `longitude`') self._longitude = value
python
def longitude(self, value=0.0): """Corresponds to IDD Field `longitude` - is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `longitude` Unit: deg Default value: 0.0 value >= -180.0 value <= 180.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `longitude`'.format(value)) if value < -180.0: raise ValueError('value need to be greater or equal -180.0 ' 'for field `longitude`') if value > 180.0: raise ValueError('value need to be smaller 180.0 ' 'for field `longitude`') self._longitude = value
[ "def", "longitude", "(", "self", ",", "value", "=", "0.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `longitude`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "-", "180.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal -180.0 '", "'for field `longitude`'", ")", "if", "value", ">", "180.0", ":", "raise", "ValueError", "(", "'value need to be smaller 180.0 '", "'for field `longitude`'", ")", "self", ".", "_longitude", "=", "value" ]
Corresponds to IDD Field `longitude` - is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `longitude` Unit: deg Default value: 0.0 value >= -180.0 value <= 180.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "longitude" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L321-L352
train
rbuffat/pyepw
pyepw/epw.py
Location.timezone
def timezone(self, value=0.0): """Corresponds to IDD Field `timezone` Time relative to GMT. Args: value (float): value for IDD Field `timezone` Unit: hr - not on standard units list??? Default value: 0.0 value >= -12.0 value <= 12.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `timezone`'.format(value)) if value < -12.0: raise ValueError('value need to be greater or equal -12.0 ' 'for field `timezone`') if value > 12.0: raise ValueError('value need to be smaller 12.0 ' 'for field `timezone`') self._timezone = value
python
def timezone(self, value=0.0): """Corresponds to IDD Field `timezone` Time relative to GMT. Args: value (float): value for IDD Field `timezone` Unit: hr - not on standard units list??? Default value: 0.0 value >= -12.0 value <= 12.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `timezone`'.format(value)) if value < -12.0: raise ValueError('value need to be greater or equal -12.0 ' 'for field `timezone`') if value > 12.0: raise ValueError('value need to be smaller 12.0 ' 'for field `timezone`') self._timezone = value
[ "def", "timezone", "(", "self", ",", "value", "=", "0.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `timezone`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "-", "12.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal -12.0 '", "'for field `timezone`'", ")", "if", "value", ">", "12.0", ":", "raise", "ValueError", "(", "'value need to be smaller 12.0 '", "'for field `timezone`'", ")", "self", ".", "_timezone", "=", "value" ]
Corresponds to IDD Field `timezone` Time relative to GMT. Args: value (float): value for IDD Field `timezone` Unit: hr - not on standard units list??? Default value: 0.0 value >= -12.0 value <= 12.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "timezone", "Time", "relative", "to", "GMT", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L365-L394
train
rbuffat/pyepw
pyepw/epw.py
Location.elevation
def elevation(self, value=0.0): """Corresponds to IDD Field `elevation` Args: value (float): value for IDD Field `elevation` Unit: m Default value: 0.0 value >= -1000.0 value < 9999.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `elevation`'.format(value)) if value < -1000.0: raise ValueError('value need to be greater or equal -1000.0 ' 'for field `elevation`') if value >= 9999.9: raise ValueError('value need to be smaller 9999.9 ' 'for field `elevation`') self._elevation = value
python
def elevation(self, value=0.0): """Corresponds to IDD Field `elevation` Args: value (float): value for IDD Field `elevation` Unit: m Default value: 0.0 value >= -1000.0 value < 9999.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `elevation`'.format(value)) if value < -1000.0: raise ValueError('value need to be greater or equal -1000.0 ' 'for field `elevation`') if value >= 9999.9: raise ValueError('value need to be smaller 9999.9 ' 'for field `elevation`') self._elevation = value
[ "def", "elevation", "(", "self", ",", "value", "=", "0.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `elevation`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "-", "1000.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal -1000.0 '", "'for field `elevation`'", ")", "if", "value", ">=", "9999.9", ":", "raise", "ValueError", "(", "'value need to be smaller 9999.9 '", "'for field `elevation`'", ")", "self", ".", "_elevation", "=", "value" ]
Corresponds to IDD Field `elevation` Args: value (float): value for IDD Field `elevation` Unit: m Default value: 0.0 value >= -1000.0 value < 9999.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "elevation" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L407-L436
train
rbuffat/pyepw
pyepw/epw.py
Location.export
def export(self, top=True): """Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation """ out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.city)) out.append(self._to_str(self.state_province_region)) out.append(self._to_str(self.country)) out.append(self._to_str(self.source)) out.append(self._to_str(self.wmo)) out.append(self._to_str(self.latitude)) out.append(self._to_str(self.longitude)) out.append(self._to_str(self.timezone)) out.append(self._to_str(self.elevation)) return ",".join(out)
python
def export(self, top=True): """Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation """ out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.city)) out.append(self._to_str(self.state_province_region)) out.append(self._to_str(self.country)) out.append(self._to_str(self.source)) out.append(self._to_str(self.wmo)) out.append(self._to_str(self.latitude)) out.append(self._to_str(self.longitude)) out.append(self._to_str(self.timezone)) out.append(self._to_str(self.elevation)) return ",".join(out)
[ "def", "export", "(", "self", ",", "top", "=", "True", ")", ":", "out", "=", "[", "]", "if", "top", ":", "out", ".", "append", "(", "self", ".", "_internal_name", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "city", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "state_province_region", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "country", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "source", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "wmo", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "latitude", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "longitude", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "timezone", ")", ")", "out", ".", "append", "(", "self", ".", "_to_str", "(", "self", ".", "elevation", ")", ")", "return", "\",\"", ".", "join", "(", "out", ")" ]
Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation
[ "Exports", "object", "to", "its", "string", "representation", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L451-L476
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.read
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.title_of_design_condition = None else: self.title_of_design_condition = vals[i] i += 1 if len(vals[i]) == 0: self.unkown_field = None else: self.unkown_field = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_heating = None else: self.design_stat_heating = vals[i] i += 1 if len(vals[i]) == 0: self.coldestmonth = None else: self.coldestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.db996 = None else: self.db996 = vals[i] i += 1 if len(vals[i]) == 0: self.db990 = None else: self.db990 = vals[i] i += 1 if len(vals[i]) == 0: self.dp996 = None else: self.dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp996 = None else: self.hr_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp996 = None else: self.db_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.dp990 = None else: self.dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp990 = None else: self.hr_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp990 = None else: self.db_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.ws004c = None else: self.ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws004c = None else: self.db_ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.ws010c = None else: self.ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws010c = None else: self.db_ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db996 = None else: self.ws_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db996 = None else: self.wd_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_cooling = None else: self.design_stat_cooling = vals[i] i += 1 if len(vals[i]) == 0: self.hottestmonth = None else: self.hottestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.dbr = None else: self.dbr = vals[i] i += 1 if len(vals[i]) == 0: self.db004 = None else: self.db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db004 = None else: self.wb_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.db010 = None else: self.db010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db010 = None else: self.wb_db010 = vals[i] i += 1 if len(vals[i]) == 0: self.db020 = None else: self.db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db020 = None else: self.wb_db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb004 = None else: self.wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb004 = None else: self.db_wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb010 = None else: self.wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb010 = None else: self.db_wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb020 = None else: self.wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb020 = None else: self.db_wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db004 = None else: self.ws_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db004 = None else: self.wd_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp004 = None else: self.dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp004 = None else: self.hr_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp004 = None else: self.db_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp010 = None else: self.dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp010 = None else: self.hr_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp010 = None else: self.db_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.dp020 = None else: self.dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp020 = None else: self.hr_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp020 = None else: self.db_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.en004 = None else: self.en004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en004 = None else: self.db_en004 = vals[i] i += 1 if len(vals[i]) == 0: self.en010 = None else: self.en010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en010 = None else: self.db_en010 = vals[i] i += 1 if len(vals[i]) == 0: self.en020 = None else: self.en020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en020 = None else: self.db_en020 = vals[i] i += 1 if len(vals[i]) == 0: self.hrs_84_and_db12_8_or_20_6 = None else: self.hrs_84_and_db12_8_or_20_6 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_extremes = None else: self.design_stat_extremes = vals[i] i += 1 if len(vals[i]) == 0: self.ws010 = None else: self.ws010 = vals[i] i += 1 if len(vals[i]) == 0: self.ws025 = None else: self.ws025 = vals[i] i += 1 if len(vals[i]) == 0: self.ws050 = None else: self.ws050 = vals[i] i += 1 if len(vals[i]) == 0: self.wbmax = None else: self.wbmax = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_mean = None else: self.dbmin_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_mean = None else: self.dbmax_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_stddev = None else: self.dbmin_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_stddev = None else: self.dbmax_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin05years = None else: self.dbmin05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax05years = None else: self.dbmax05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin10years = None else: self.dbmin10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax10years = None else: self.dbmax10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin20years = None else: self.dbmin20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax20years = None else: self.dbmax20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin50years = None else: self.dbmin50years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax50years = None else: self.dbmax50years = vals[i] i += 1
python
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.title_of_design_condition = None else: self.title_of_design_condition = vals[i] i += 1 if len(vals[i]) == 0: self.unkown_field = None else: self.unkown_field = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_heating = None else: self.design_stat_heating = vals[i] i += 1 if len(vals[i]) == 0: self.coldestmonth = None else: self.coldestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.db996 = None else: self.db996 = vals[i] i += 1 if len(vals[i]) == 0: self.db990 = None else: self.db990 = vals[i] i += 1 if len(vals[i]) == 0: self.dp996 = None else: self.dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp996 = None else: self.hr_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp996 = None else: self.db_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.dp990 = None else: self.dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp990 = None else: self.hr_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp990 = None else: self.db_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.ws004c = None else: self.ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws004c = None else: self.db_ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.ws010c = None else: self.ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws010c = None else: self.db_ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db996 = None else: self.ws_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db996 = None else: self.wd_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_cooling = None else: self.design_stat_cooling = vals[i] i += 1 if len(vals[i]) == 0: self.hottestmonth = None else: self.hottestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.dbr = None else: self.dbr = vals[i] i += 1 if len(vals[i]) == 0: self.db004 = None else: self.db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db004 = None else: self.wb_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.db010 = None else: self.db010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db010 = None else: self.wb_db010 = vals[i] i += 1 if len(vals[i]) == 0: self.db020 = None else: self.db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db020 = None else: self.wb_db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb004 = None else: self.wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb004 = None else: self.db_wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb010 = None else: self.wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb010 = None else: self.db_wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb020 = None else: self.wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb020 = None else: self.db_wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db004 = None else: self.ws_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db004 = None else: self.wd_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp004 = None else: self.dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp004 = None else: self.hr_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp004 = None else: self.db_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp010 = None else: self.dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp010 = None else: self.hr_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp010 = None else: self.db_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.dp020 = None else: self.dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp020 = None else: self.hr_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp020 = None else: self.db_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.en004 = None else: self.en004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en004 = None else: self.db_en004 = vals[i] i += 1 if len(vals[i]) == 0: self.en010 = None else: self.en010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en010 = None else: self.db_en010 = vals[i] i += 1 if len(vals[i]) == 0: self.en020 = None else: self.en020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en020 = None else: self.db_en020 = vals[i] i += 1 if len(vals[i]) == 0: self.hrs_84_and_db12_8_or_20_6 = None else: self.hrs_84_and_db12_8_or_20_6 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_extremes = None else: self.design_stat_extremes = vals[i] i += 1 if len(vals[i]) == 0: self.ws010 = None else: self.ws010 = vals[i] i += 1 if len(vals[i]) == 0: self.ws025 = None else: self.ws025 = vals[i] i += 1 if len(vals[i]) == 0: self.ws050 = None else: self.ws050 = vals[i] i += 1 if len(vals[i]) == 0: self.wbmax = None else: self.wbmax = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_mean = None else: self.dbmin_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_mean = None else: self.dbmax_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_stddev = None else: self.dbmin_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_stddev = None else: self.dbmax_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin05years = None else: self.dbmin05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax05years = None else: self.dbmax05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin10years = None else: self.dbmin10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax10years = None else: self.dbmax10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin20years = None else: self.dbmin20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax20years = None else: self.dbmax20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin50years = None else: self.dbmin50years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax50years = None else: self.dbmax50years = vals[i] i += 1
[ "def", "read", "(", "self", ",", "vals", ")", ":", "i", "=", "0", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "title_of_design_condition", "=", "None", "else", ":", "self", ".", "title_of_design_condition", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "unkown_field", "=", "None", "else", ":", "self", ".", "unkown_field", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "design_stat_heating", "=", "None", "else", ":", "self", ".", "design_stat_heating", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "coldestmonth", "=", "None", "else", ":", "self", ".", "coldestmonth", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db996", "=", "None", "else", ":", "self", ".", "db996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db990", "=", "None", "else", ":", "self", ".", "db990", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dp996", "=", "None", "else", ":", "self", ".", "dp996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hr_dp996", "=", "None", "else", ":", "self", ".", "hr_dp996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_dp996", "=", "None", "else", ":", "self", ".", "db_dp996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dp990", "=", "None", "else", ":", "self", ".", "dp990", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hr_dp990", "=", "None", "else", ":", "self", ".", "hr_dp990", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_dp990", "=", "None", "else", ":", "self", ".", "db_dp990", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws004c", "=", "None", "else", ":", "self", ".", "ws004c", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_ws004c", "=", "None", "else", ":", "self", ".", "db_ws004c", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws010c", "=", "None", "else", ":", "self", ".", "ws010c", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_ws010c", "=", "None", "else", ":", "self", ".", "db_ws010c", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws_db996", "=", "None", "else", ":", "self", ".", "ws_db996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wd_db996", "=", "None", "else", ":", "self", ".", "wd_db996", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "design_stat_cooling", "=", "None", "else", ":", "self", ".", "design_stat_cooling", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hottestmonth", "=", "None", "else", ":", "self", ".", "hottestmonth", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbr", "=", "None", "else", ":", "self", ".", "dbr", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db004", "=", "None", "else", ":", "self", ".", "db004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb_db004", "=", "None", "else", ":", "self", ".", "wb_db004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db010", "=", "None", "else", ":", "self", ".", "db010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb_db010", "=", "None", "else", ":", "self", ".", "wb_db010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db020", "=", "None", "else", ":", "self", ".", "db020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb_db020", "=", "None", "else", ":", "self", ".", "wb_db020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb004", "=", "None", "else", ":", "self", ".", "wb004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_wb004", "=", "None", "else", ":", "self", ".", "db_wb004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb010", "=", "None", "else", ":", "self", ".", "wb010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_wb010", "=", "None", "else", ":", "self", ".", "db_wb010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wb020", "=", "None", "else", ":", "self", ".", "wb020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_wb020", "=", "None", "else", ":", "self", ".", "db_wb020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws_db004", "=", "None", "else", ":", "self", ".", "ws_db004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wd_db004", "=", "None", "else", ":", "self", ".", "wd_db004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dp004", "=", "None", "else", ":", "self", ".", "dp004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hr_dp004", "=", "None", "else", ":", "self", ".", "hr_dp004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_dp004", "=", "None", "else", ":", "self", ".", "db_dp004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dp010", "=", "None", "else", ":", "self", ".", "dp010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hr_dp010", "=", "None", "else", ":", "self", ".", "hr_dp010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_dp010", "=", "None", "else", ":", "self", ".", "db_dp010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dp020", "=", "None", "else", ":", "self", ".", "dp020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hr_dp020", "=", "None", "else", ":", "self", ".", "hr_dp020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_dp020", "=", "None", "else", ":", "self", ".", "db_dp020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "en004", "=", "None", "else", ":", "self", ".", "en004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_en004", "=", "None", "else", ":", "self", ".", "db_en004", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "en010", "=", "None", "else", ":", "self", ".", "en010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_en010", "=", "None", "else", ":", "self", ".", "db_en010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "en020", "=", "None", "else", ":", "self", ".", "en020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "db_en020", "=", "None", "else", ":", "self", ".", "db_en020", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "hrs_84_and_db12_8_or_20_6", "=", "None", "else", ":", "self", ".", "hrs_84_and_db12_8_or_20_6", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "design_stat_extremes", "=", "None", "else", ":", "self", ".", "design_stat_extremes", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws010", "=", "None", "else", ":", "self", ".", "ws010", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws025", "=", "None", "else", ":", "self", ".", "ws025", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "ws050", "=", "None", "else", ":", "self", ".", "ws050", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wbmax", "=", "None", "else", ":", "self", ".", "wbmax", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin_mean", "=", "None", "else", ":", "self", ".", "dbmin_mean", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax_mean", "=", "None", "else", ":", "self", ".", "dbmax_mean", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin_stddev", "=", "None", "else", ":", "self", ".", "dbmin_stddev", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax_stddev", "=", "None", "else", ":", "self", ".", "dbmax_stddev", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin05years", "=", "None", "else", ":", "self", ".", "dbmin05years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax05years", "=", "None", "else", ":", "self", ".", "dbmax05years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin10years", "=", "None", "else", ":", "self", ".", "dbmin10years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax10years", "=", "None", "else", ":", "self", ".", "dbmax10years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin20years", "=", "None", "else", ":", "self", ".", "dbmin20years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax20years", "=", "None", "else", ":", "self", ".", "dbmax20years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmin50years", "=", "None", "else", ":", "self", ".", "dbmin50years", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "dbmax50years", "=", "None", "else", ":", "self", ".", "dbmax50years", "=", "vals", "[", "i", "]", "i", "+=", "1" ]
Read values. Args: vals (list): list of strings representing values
[ "Read", "values", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L559-L906
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.title_of_design_condition
def title_of_design_condition(self, value=None): """Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `title_of_design_condition`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `title_of_design_condition`') self._title_of_design_condition = value
python
def title_of_design_condition(self, value=None): """Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `title_of_design_condition`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `title_of_design_condition`') self._title_of_design_condition = value
[ "def", "title_of_design_condition", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `title_of_design_condition`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `title_of_design_condition`'", ")", "self", ".", "_title_of_design_condition", "=", "value" ]
Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "title_of_design_condition" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L919-L942
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.unkown_field
def unkown_field(self, value=None): """Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `unkown_field`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `unkown_field`') self._unkown_field = value
python
def unkown_field(self, value=None): """Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `unkown_field`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `unkown_field`') self._unkown_field = value
[ "def", "unkown_field", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `unkown_field`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `unkown_field`'", ")", "self", ".", "_unkown_field", "=", "value" ]
Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "unkown_field", "Empty", "field", "in", "data", "." ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L955-L977
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.design_stat_heating
def design_stat_heating(self, value="Heating"): """Corresponds to IDD Field `design_stat_heating` Args: value (str): value for IDD Field `design_stat_heating` Accepted values are: - Heating Default value: Heating if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `design_stat_heating`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `design_stat_heating`') vals = set() vals.add("Heating") if value not in vals: raise ValueError('value {} is not an accepted value for ' 'field `design_stat_heating`'.format(value)) self._design_stat_heating = value
python
def design_stat_heating(self, value="Heating"): """Corresponds to IDD Field `design_stat_heating` Args: value (str): value for IDD Field `design_stat_heating` Accepted values are: - Heating Default value: Heating if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `design_stat_heating`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `design_stat_heating`') vals = set() vals.add("Heating") if value not in vals: raise ValueError('value {} is not an accepted value for ' 'field `design_stat_heating`'.format(value)) self._design_stat_heating = value
[ "def", "design_stat_heating", "(", "self", ",", "value", "=", "\"Heating\"", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `design_stat_heating`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `design_stat_heating`'", ")", "vals", "=", "set", "(", ")", "vals", ".", "add", "(", "\"Heating\"", ")", "if", "value", "not", "in", "vals", ":", "raise", "ValueError", "(", "'value {} is not an accepted value for '", "'field `design_stat_heating`'", ".", "format", "(", "value", ")", ")", "self", ".", "_design_stat_heating", "=", "value" ]
Corresponds to IDD Field `design_stat_heating` Args: value (str): value for IDD Field `design_stat_heating` Accepted values are: - Heating Default value: Heating if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "design_stat_heating" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L990-L1021
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.coldestmonth
def coldestmonth(self, value=None): """Corresponds to IDD Field `coldestmonth` Args: value (int): value for IDD Field `coldestmonth` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `coldestmonth`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `coldestmonth`') if value > 12: raise ValueError('value need to be smaller 12 ' 'for field `coldestmonth`') self._coldestmonth = value
python
def coldestmonth(self, value=None): """Corresponds to IDD Field `coldestmonth` Args: value (int): value for IDD Field `coldestmonth` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `coldestmonth`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `coldestmonth`') if value > 12: raise ValueError('value need to be smaller 12 ' 'for field `coldestmonth`') self._coldestmonth = value
[ "def", "coldestmonth", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type int '", "'for field `coldestmonth`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "1", ":", "raise", "ValueError", "(", "'value need to be greater or equal 1 '", "'for field `coldestmonth`'", ")", "if", "value", ">", "12", ":", "raise", "ValueError", "(", "'value need to be smaller 12 '", "'for field `coldestmonth`'", ")", "self", ".", "_coldestmonth", "=", "value" ]
Corresponds to IDD Field `coldestmonth` Args: value (int): value for IDD Field `coldestmonth` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "coldestmonth" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1034-L1061
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.db996
def db996(self, value=None): """ Corresponds to IDD Field `db996` Dry-bulb temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db996`'.format(value)) self._db996 = value
python
def db996(self, value=None): """ Corresponds to IDD Field `db996` Dry-bulb temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db996`'.format(value)) self._db996 = value
[ "def", "db996", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `db996`'", ".", "format", "(", "value", ")", ")", "self", ".", "_db996", "=", "value" ]
Corresponds to IDD Field `db996` Dry-bulb temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "db996", "Dry", "-", "bulb", "temperature", "corresponding", "to", "99", ".", "6%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1074-L1095
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.db990
def db990(self, value=None): """ Corresponds to IDD Field `db990` Dry-bulb temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db990`'.format(value)) self._db990 = value
python
def db990(self, value=None): """ Corresponds to IDD Field `db990` Dry-bulb temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db990`'.format(value)) self._db990 = value
[ "def", "db990", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `db990`'", ".", "format", "(", "value", ")", ")", "self", ".", "_db990", "=", "value" ]
Corresponds to IDD Field `db990` Dry-bulb temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "db990", "Dry", "-", "bulb", "temperature", "corresponding", "to", "90", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1108-L1129
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.dp996
def dp996(self, value=None): """ Corresponds to IDD Field `dp996` Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp996`'.format(value)) self._dp996 = value
python
def dp996(self, value=None): """ Corresponds to IDD Field `dp996` Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp996`'.format(value)) self._dp996 = value
[ "def", "dp996", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `dp996`'", ".", "format", "(", "value", ")", ")", "self", ".", "_dp996", "=", "value" ]
Corresponds to IDD Field `dp996` Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "dp996", "Dew", "-", "point", "temperature", "corresponding", "to", "99", ".", "6%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1142-L1163
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.hr_dp996
def hr_dp996(self, value=None): """ Corresponds to IDD Field `hr_dp996` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp996` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `hr_dp996`'.format(value)) self._hr_dp996 = value
python
def hr_dp996(self, value=None): """ Corresponds to IDD Field `hr_dp996` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp996` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `hr_dp996`'.format(value)) self._hr_dp996 = value
[ "def", "hr_dp996", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `hr_dp996`'", ".", "format", "(", "value", ")", ")", "self", ".", "_hr_dp996", "=", "value" ]
Corresponds to IDD Field `hr_dp996` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp996` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "hr_dp996", "humidity", "ratio", "calculated", "at", "standard", "atmospheric", "pressure", "at", "elevation", "of", "station", "corresponding", "to", "Dew", "-", "point", "temperature", "corresponding", "to", "99", ".", "6%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1176-L1198
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.db_dp996
def db_dp996(self, value=None): """ Corresponds to IDD Field `db_dp996` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp996`'.format(value)) self._db_dp996 = value
python
def db_dp996(self, value=None): """ Corresponds to IDD Field `db_dp996` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp996`'.format(value)) self._db_dp996 = value
[ "def", "db_dp996", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `db_dp996`'", ".", "format", "(", "value", ")", ")", "self", ".", "_db_dp996", "=", "value" ]
Corresponds to IDD Field `db_dp996` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "db_dp996", "mean", "coincident", "drybulb", "temperature", "corresponding", "to", "Dew", "-", "point", "temperature", "corresponding", "to", "99", ".", "6%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1211-L1233
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.dp990
def dp990(self, value=None): """ Corresponds to IDD Field `dp990` Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp990`'.format(value)) self._dp990 = value
python
def dp990(self, value=None): """ Corresponds to IDD Field `dp990` Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp990`'.format(value)) self._dp990 = value
[ "def", "dp990", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `dp990`'", ".", "format", "(", "value", ")", ")", "self", ".", "_dp990", "=", "value" ]
Corresponds to IDD Field `dp990` Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "dp990", "Dew", "-", "point", "temperature", "corresponding", "to", "90", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1246-L1267
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.hr_dp990
def hr_dp990(self, value=None): """ Corresponds to IDD Field `hr_dp990` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp990` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `hr_dp990`'.format(value)) self._hr_dp990 = value
python
def hr_dp990(self, value=None): """ Corresponds to IDD Field `hr_dp990` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp990` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `hr_dp990`'.format(value)) self._hr_dp990 = value
[ "def", "hr_dp990", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `hr_dp990`'", ".", "format", "(", "value", ")", ")", "self", ".", "_hr_dp990", "=", "value" ]
Corresponds to IDD Field `hr_dp990` humidity ratio, calculated at standard atmospheric pressure at elevation of station, corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `hr_dp990` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "hr_dp990", "humidity", "ratio", "calculated", "at", "standard", "atmospheric", "pressure", "at", "elevation", "of", "station", "corresponding", "to", "Dew", "-", "point", "temperature", "corresponding", "to", "90", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1280-L1302
train
rbuffat/pyepw
pyepw/epw.py
DesignCondition.db_dp990
def db_dp990(self, value=None): """ Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp990`'.format(value)) self._db_dp990 = value
python
def db_dp990(self, value=None): """ Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp990`'.format(value)) self._db_dp990 = value
[ "def", "db_dp990", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `db_dp990`'", ".", "format", "(", "value", ")", ")", "self", ".", "_db_dp990", "=", "value" ]
Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "db_dp990", "mean", "coincident", "drybulb", "temperature", "corresponding", "to", "Dew", "-", "point", "temperature", "corresponding", "to", "90", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence", "(", "cold", "conditions", ")" ]
373d4d3c8386c8d35789f086ac5f6018c2711745
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1315-L1337
train