repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
davisd50/sparc.cache
sparc/cache/splunk/area.py
CacheAreaForSplunkKV.cache
python
def cache(self, CachableItem): _cachedItem = self.get(CachableItem) if not _cachedItem: _cachedItem = self.mapper.get(CachableItem) self._add(_cachedItem) logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__)) notify(CacheObjectCreatedEvent(_cachedItem, self)) return _cachedItem else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) self._update(_newCacheItem) notify(CacheObjectModifiedEvent(_newCacheItem, self)) return _newCacheItem return None
Updates caches area with latest item information returning ICachedItem if cache updates were required. Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for ICacheArea/ICachableItem combo.
train
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L117-L138
[ "def _add(self, CachedItem):\n r = self.request('post',\n self.url+\"storage/collections/data/\"+self.collname, \n headers={'Content-Type': 'application/json'}, \n data=json.dumps(self._data(CachedItem)))\n r.raise_for_status()\n", "def _update(self, CachedItem):\n r = self.request('post',\n self.url+\"storage/collections/data/\"+self.collname+'/'+CachedItem.getId(),\n headers={'Content-Type': 'application/json'}, \n data=json.dumps(self._data(CachedItem)))\n r.raise_for_status()\n", "def get(self, CachableItem):\n \"\"\"Returns current ICachedItem for ICachableItem or None if not cached\"\"\"\n cached_item = self.mapper.get(CachableItem)\n r = self.request('get',\n self.url+\"storage/collections/data/\"+self.collname+'/'+cached_item.getId(),\n data={'output_mode': 'json'})\n if r.ok:\n # we need to update the object with the values found in the cache area\n data = r.json()\n for name in self.mapper.mapper:\n setattr(cached_item, name, data[name])\n return cached_item\n return None\n" ]
class CacheAreaForSplunkKV(object): """An area where cached information can be stored persistently.""" implements(ITrimmableCacheArea) adapts(sparc.cache.ICachedItemMapper, sparc.db.splunk.ISplunkKVCollectionSchema, sparc.db.splunk.ISplunkConnectionInfo, sparc.db.splunk.ISPlunkKVCollectionIdentifier, sparc.utils.requests.IRequest) def __init__(self, mapper, schema, sci, kv_id, request): """Object initializer Args: mapper: Object providing sparc.cache.ICachedItemMapper that will convert ICachableItem instances into ICachedItem instances. schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema. sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide connection information for Splunk indexing server kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier request: Object providing sparc.utils.requests.IRequest """ self.gooble_request_warnings = False self.mapper = mapper self.schema = schema self.sci = sci self.kv_id = kv_id self._request = request self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],) self.collname = kv_id.collection self.appname = kv_id.application self.username = kv_id.username self.url = "".join(['https://',sci['host'],':',sci['port'], '/servicesNS/',self.username,'/', self.appname,'/']) def current_kv_names(self): """Return set of string names of current available Splunk KV collections""" return current_kv_names(self.sci, self.username, self.appname, request=self._request) def request(self, *args, **kwargs): return self._request.request(*args, **kwargs) def _data(self, CachedItem): data = {k:getattr(CachedItem, k) for k in self.mapper.mapper} data['_key'] = CachedItem.getId() return data def _add(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _update(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(), headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _delete(self, id_): if not id_: raise ValueError("Expected valid id for deletion") r = self.request('delete', self.url+"storage/collections/data/"+self.collname+'/'+str(id_)) r.raise_for_status() def _all_ids(self): r = self.request('get', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, params={'output_type': 'json', 'fields':'id'}) r.raise_for_status() data = set(map(lambda d: str(d['id']), r.json())) return data #ICacheArea def get(self, CachableItem): """Returns current ICachedItem for ICachableItem or None if not cached""" cached_item = self.mapper.get(CachableItem) r = self.request('get', self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(), data={'output_mode': 'json'}) if r.ok: # we need to update the object with the values found in the cache area data = r.json() for name in self.mapper.mapper: setattr(cached_item, name, data[name]) return cached_item return None def isDirty(self, CachableItem): """True if cached information requires update for ICachableItem""" _cachedItem = self.get(CachableItem) if not _cachedItem: return True _newCacheItem = self.mapper.get(CachableItem) return False if _cachedItem == _newCacheItem else True def import_source(self, CachableSource): """Updates cache area and returns number of items updated with all available entries in ICachableSource """ _count = 0 self._import_source_items_id_list = set() # used to help speed up trim() for item in CachableSource.items(): self._import_source_items_id_list.add(item.getId()) if self.cache(item): _count += 1 return _count def reset(self): """Deletes all entries in the cache area""" if self.collname not in self.current_kv_names(): return # nothing to do # we'll simply delete the entire collection and then re-create it. r = self.request('delete', self.url+"storage/collections/data/"+self.collname) r.raise_for_status() self.initialize() def initialize(self): """Instantiates the cache area to be ready for updates""" if self.collname not in self.current_kv_names(): r = self.request('post', self.url+"storage/collections/config", headers={'content-type': 'application/json'}, data={'name': self.collname}) r.raise_for_status() # initialize schema re = self.request('post', self.url+"storage/collections/config/"+self.collname, headers = {'content-type': 'application/json'}, data=self.schema) re.raise_for_status() logger.info("initialized Splunk Key Value Collection %s with schema %s"\ % (self.collname, str(self.schema))) if self.collname not in self.current_kv_names(): raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names()))) #ITrimmableCacheArea def trim(self, source): if not ICachableSource.providedBy(source): #we'll fake a partial ICachableSource for use with import_source() source_type = type('FakeCachableSource', (object,), {}) _source = source #re-assign due to closure issue with source re-assignment below source_type.items = lambda self: _source source = source_type() updated = self.import_source(source) diff = self._all_ids() - self._import_source_items_id_list map(self._delete, diff) return (updated, len(diff), )
davisd50/sparc.cache
sparc/cache/splunk/area.py
CacheAreaForSplunkKV.import_source
python
def import_source(self, CachableSource): _count = 0 self._import_source_items_id_list = set() # used to help speed up trim() for item in CachableSource.items(): self._import_source_items_id_list.add(item.getId()) if self.cache(item): _count += 1 return _count
Updates cache area and returns number of items updated with all available entries in ICachableSource
train
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L140-L150
[ "def cache(self, CachableItem):\n \"\"\"Updates caches area with latest item information returning\n ICachedItem if cache updates were required.\n\n Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for\n ICacheArea/ICachableItem combo.\n \"\"\"\n _cachedItem = self.get(CachableItem)\n if not _cachedItem:\n _cachedItem = self.mapper.get(CachableItem)\n self._add(_cachedItem)\n logger.debug(\"new cachable item added to Splunk KV cache area {id: %s, type: %s}\", str(_cachedItem.getId()), str(_cachedItem.__class__))\n notify(CacheObjectCreatedEvent(_cachedItem, self))\n return _cachedItem\n else:\n _newCacheItem = self.mapper.get(CachableItem)\n if _cachedItem != _newCacheItem:\n logger.debug(\"Cachable item modified in Splunk KV cache area {id: %s, type: %s}\", str(_newCacheItem.getId()), str(_newCacheItem.__class__))\n self._update(_newCacheItem)\n notify(CacheObjectModifiedEvent(_newCacheItem, self))\n return _newCacheItem\n return None\n" ]
class CacheAreaForSplunkKV(object): """An area where cached information can be stored persistently.""" implements(ITrimmableCacheArea) adapts(sparc.cache.ICachedItemMapper, sparc.db.splunk.ISplunkKVCollectionSchema, sparc.db.splunk.ISplunkConnectionInfo, sparc.db.splunk.ISPlunkKVCollectionIdentifier, sparc.utils.requests.IRequest) def __init__(self, mapper, schema, sci, kv_id, request): """Object initializer Args: mapper: Object providing sparc.cache.ICachedItemMapper that will convert ICachableItem instances into ICachedItem instances. schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema. sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide connection information for Splunk indexing server kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier request: Object providing sparc.utils.requests.IRequest """ self.gooble_request_warnings = False self.mapper = mapper self.schema = schema self.sci = sci self.kv_id = kv_id self._request = request self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],) self.collname = kv_id.collection self.appname = kv_id.application self.username = kv_id.username self.url = "".join(['https://',sci['host'],':',sci['port'], '/servicesNS/',self.username,'/', self.appname,'/']) def current_kv_names(self): """Return set of string names of current available Splunk KV collections""" return current_kv_names(self.sci, self.username, self.appname, request=self._request) def request(self, *args, **kwargs): return self._request.request(*args, **kwargs) def _data(self, CachedItem): data = {k:getattr(CachedItem, k) for k in self.mapper.mapper} data['_key'] = CachedItem.getId() return data def _add(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _update(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(), headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _delete(self, id_): if not id_: raise ValueError("Expected valid id for deletion") r = self.request('delete', self.url+"storage/collections/data/"+self.collname+'/'+str(id_)) r.raise_for_status() def _all_ids(self): r = self.request('get', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, params={'output_type': 'json', 'fields':'id'}) r.raise_for_status() data = set(map(lambda d: str(d['id']), r.json())) return data #ICacheArea def get(self, CachableItem): """Returns current ICachedItem for ICachableItem or None if not cached""" cached_item = self.mapper.get(CachableItem) r = self.request('get', self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(), data={'output_mode': 'json'}) if r.ok: # we need to update the object with the values found in the cache area data = r.json() for name in self.mapper.mapper: setattr(cached_item, name, data[name]) return cached_item return None def isDirty(self, CachableItem): """True if cached information requires update for ICachableItem""" _cachedItem = self.get(CachableItem) if not _cachedItem: return True _newCacheItem = self.mapper.get(CachableItem) return False if _cachedItem == _newCacheItem else True def cache(self, CachableItem): """Updates caches area with latest item information returning ICachedItem if cache updates were required. Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for ICacheArea/ICachableItem combo. """ _cachedItem = self.get(CachableItem) if not _cachedItem: _cachedItem = self.mapper.get(CachableItem) self._add(_cachedItem) logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__)) notify(CacheObjectCreatedEvent(_cachedItem, self)) return _cachedItem else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) self._update(_newCacheItem) notify(CacheObjectModifiedEvent(_newCacheItem, self)) return _newCacheItem return None def reset(self): """Deletes all entries in the cache area""" if self.collname not in self.current_kv_names(): return # nothing to do # we'll simply delete the entire collection and then re-create it. r = self.request('delete', self.url+"storage/collections/data/"+self.collname) r.raise_for_status() self.initialize() def initialize(self): """Instantiates the cache area to be ready for updates""" if self.collname not in self.current_kv_names(): r = self.request('post', self.url+"storage/collections/config", headers={'content-type': 'application/json'}, data={'name': self.collname}) r.raise_for_status() # initialize schema re = self.request('post', self.url+"storage/collections/config/"+self.collname, headers = {'content-type': 'application/json'}, data=self.schema) re.raise_for_status() logger.info("initialized Splunk Key Value Collection %s with schema %s"\ % (self.collname, str(self.schema))) if self.collname not in self.current_kv_names(): raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names()))) #ITrimmableCacheArea def trim(self, source): if not ICachableSource.providedBy(source): #we'll fake a partial ICachableSource for use with import_source() source_type = type('FakeCachableSource', (object,), {}) _source = source #re-assign due to closure issue with source re-assignment below source_type.items = lambda self: _source source = source_type() updated = self.import_source(source) diff = self._all_ids() - self._import_source_items_id_list map(self._delete, diff) return (updated, len(diff), )
davisd50/sparc.cache
sparc/cache/splunk/area.py
CacheAreaForSplunkKV.reset
python
def reset(self): if self.collname not in self.current_kv_names(): return # nothing to do # we'll simply delete the entire collection and then re-create it. r = self.request('delete', self.url+"storage/collections/data/"+self.collname) r.raise_for_status() self.initialize()
Deletes all entries in the cache area
train
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L152-L160
[ "def current_kv_names(self):\n \"\"\"Return set of string names of current available Splunk KV collections\"\"\"\n return current_kv_names(self.sci, self.username, self.appname, request=self._request)\n", "def request(self, *args, **kwargs):\n return self._request.request(*args, **kwargs)\n", "def initialize(self):\n \"\"\"Instantiates the cache area to be ready for updates\"\"\"\n if self.collname not in self.current_kv_names():\n r = self.request('post',\n self.url+\"storage/collections/config\",\n headers={'content-type': 'application/json'},\n data={'name': self.collname})\n r.raise_for_status()\n # initialize schema\n re = self.request('post',\n self.url+\"storage/collections/config/\"+self.collname,\n headers = {'content-type': 'application/json'},\n data=self.schema)\n re.raise_for_status()\n logger.info(\"initialized Splunk Key Value Collection %s with schema %s\"\\\n % (self.collname, str(self.schema)))\n if self.collname not in self.current_kv_names():\n raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))\n" ]
class CacheAreaForSplunkKV(object): """An area where cached information can be stored persistently.""" implements(ITrimmableCacheArea) adapts(sparc.cache.ICachedItemMapper, sparc.db.splunk.ISplunkKVCollectionSchema, sparc.db.splunk.ISplunkConnectionInfo, sparc.db.splunk.ISPlunkKVCollectionIdentifier, sparc.utils.requests.IRequest) def __init__(self, mapper, schema, sci, kv_id, request): """Object initializer Args: mapper: Object providing sparc.cache.ICachedItemMapper that will convert ICachableItem instances into ICachedItem instances. schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema. sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide connection information for Splunk indexing server kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier request: Object providing sparc.utils.requests.IRequest """ self.gooble_request_warnings = False self.mapper = mapper self.schema = schema self.sci = sci self.kv_id = kv_id self._request = request self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],) self.collname = kv_id.collection self.appname = kv_id.application self.username = kv_id.username self.url = "".join(['https://',sci['host'],':',sci['port'], '/servicesNS/',self.username,'/', self.appname,'/']) def current_kv_names(self): """Return set of string names of current available Splunk KV collections""" return current_kv_names(self.sci, self.username, self.appname, request=self._request) def request(self, *args, **kwargs): return self._request.request(*args, **kwargs) def _data(self, CachedItem): data = {k:getattr(CachedItem, k) for k in self.mapper.mapper} data['_key'] = CachedItem.getId() return data def _add(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _update(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(), headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _delete(self, id_): if not id_: raise ValueError("Expected valid id for deletion") r = self.request('delete', self.url+"storage/collections/data/"+self.collname+'/'+str(id_)) r.raise_for_status() def _all_ids(self): r = self.request('get', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, params={'output_type': 'json', 'fields':'id'}) r.raise_for_status() data = set(map(lambda d: str(d['id']), r.json())) return data #ICacheArea def get(self, CachableItem): """Returns current ICachedItem for ICachableItem or None if not cached""" cached_item = self.mapper.get(CachableItem) r = self.request('get', self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(), data={'output_mode': 'json'}) if r.ok: # we need to update the object with the values found in the cache area data = r.json() for name in self.mapper.mapper: setattr(cached_item, name, data[name]) return cached_item return None def isDirty(self, CachableItem): """True if cached information requires update for ICachableItem""" _cachedItem = self.get(CachableItem) if not _cachedItem: return True _newCacheItem = self.mapper.get(CachableItem) return False if _cachedItem == _newCacheItem else True def cache(self, CachableItem): """Updates caches area with latest item information returning ICachedItem if cache updates were required. Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for ICacheArea/ICachableItem combo. """ _cachedItem = self.get(CachableItem) if not _cachedItem: _cachedItem = self.mapper.get(CachableItem) self._add(_cachedItem) logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__)) notify(CacheObjectCreatedEvent(_cachedItem, self)) return _cachedItem else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) self._update(_newCacheItem) notify(CacheObjectModifiedEvent(_newCacheItem, self)) return _newCacheItem return None def import_source(self, CachableSource): """Updates cache area and returns number of items updated with all available entries in ICachableSource """ _count = 0 self._import_source_items_id_list = set() # used to help speed up trim() for item in CachableSource.items(): self._import_source_items_id_list.add(item.getId()) if self.cache(item): _count += 1 return _count def initialize(self): """Instantiates the cache area to be ready for updates""" if self.collname not in self.current_kv_names(): r = self.request('post', self.url+"storage/collections/config", headers={'content-type': 'application/json'}, data={'name': self.collname}) r.raise_for_status() # initialize schema re = self.request('post', self.url+"storage/collections/config/"+self.collname, headers = {'content-type': 'application/json'}, data=self.schema) re.raise_for_status() logger.info("initialized Splunk Key Value Collection %s with schema %s"\ % (self.collname, str(self.schema))) if self.collname not in self.current_kv_names(): raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names()))) #ITrimmableCacheArea def trim(self, source): if not ICachableSource.providedBy(source): #we'll fake a partial ICachableSource for use with import_source() source_type = type('FakeCachableSource', (object,), {}) _source = source #re-assign due to closure issue with source re-assignment below source_type.items = lambda self: _source source = source_type() updated = self.import_source(source) diff = self._all_ids() - self._import_source_items_id_list map(self._delete, diff) return (updated, len(diff), )
davisd50/sparc.cache
sparc/cache/splunk/area.py
CacheAreaForSplunkKV.initialize
python
def initialize(self): if self.collname not in self.current_kv_names(): r = self.request('post', self.url+"storage/collections/config", headers={'content-type': 'application/json'}, data={'name': self.collname}) r.raise_for_status() # initialize schema re = self.request('post', self.url+"storage/collections/config/"+self.collname, headers = {'content-type': 'application/json'}, data=self.schema) re.raise_for_status() logger.info("initialized Splunk Key Value Collection %s with schema %s"\ % (self.collname, str(self.schema))) if self.collname not in self.current_kv_names(): raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))
Instantiates the cache area to be ready for updates
train
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L162-L179
[ "def current_kv_names(self):\n \"\"\"Return set of string names of current available Splunk KV collections\"\"\"\n return current_kv_names(self.sci, self.username, self.appname, request=self._request)\n", "def request(self, *args, **kwargs):\n return self._request.request(*args, **kwargs)\n" ]
class CacheAreaForSplunkKV(object): """An area where cached information can be stored persistently.""" implements(ITrimmableCacheArea) adapts(sparc.cache.ICachedItemMapper, sparc.db.splunk.ISplunkKVCollectionSchema, sparc.db.splunk.ISplunkConnectionInfo, sparc.db.splunk.ISPlunkKVCollectionIdentifier, sparc.utils.requests.IRequest) def __init__(self, mapper, schema, sci, kv_id, request): """Object initializer Args: mapper: Object providing sparc.cache.ICachedItemMapper that will convert ICachableItem instances into ICachedItem instances. schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema. sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide connection information for Splunk indexing server kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier request: Object providing sparc.utils.requests.IRequest """ self.gooble_request_warnings = False self.mapper = mapper self.schema = schema self.sci = sci self.kv_id = kv_id self._request = request self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],) self.collname = kv_id.collection self.appname = kv_id.application self.username = kv_id.username self.url = "".join(['https://',sci['host'],':',sci['port'], '/servicesNS/',self.username,'/', self.appname,'/']) def current_kv_names(self): """Return set of string names of current available Splunk KV collections""" return current_kv_names(self.sci, self.username, self.appname, request=self._request) def request(self, *args, **kwargs): return self._request.request(*args, **kwargs) def _data(self, CachedItem): data = {k:getattr(CachedItem, k) for k in self.mapper.mapper} data['_key'] = CachedItem.getId() return data def _add(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _update(self, CachedItem): r = self.request('post', self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(), headers={'Content-Type': 'application/json'}, data=json.dumps(self._data(CachedItem))) r.raise_for_status() def _delete(self, id_): if not id_: raise ValueError("Expected valid id for deletion") r = self.request('delete', self.url+"storage/collections/data/"+self.collname+'/'+str(id_)) r.raise_for_status() def _all_ids(self): r = self.request('get', self.url+"storage/collections/data/"+self.collname, headers={'Content-Type': 'application/json'}, params={'output_type': 'json', 'fields':'id'}) r.raise_for_status() data = set(map(lambda d: str(d['id']), r.json())) return data #ICacheArea def get(self, CachableItem): """Returns current ICachedItem for ICachableItem or None if not cached""" cached_item = self.mapper.get(CachableItem) r = self.request('get', self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(), data={'output_mode': 'json'}) if r.ok: # we need to update the object with the values found in the cache area data = r.json() for name in self.mapper.mapper: setattr(cached_item, name, data[name]) return cached_item return None def isDirty(self, CachableItem): """True if cached information requires update for ICachableItem""" _cachedItem = self.get(CachableItem) if not _cachedItem: return True _newCacheItem = self.mapper.get(CachableItem) return False if _cachedItem == _newCacheItem else True def cache(self, CachableItem): """Updates caches area with latest item information returning ICachedItem if cache updates were required. Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for ICacheArea/ICachableItem combo. """ _cachedItem = self.get(CachableItem) if not _cachedItem: _cachedItem = self.mapper.get(CachableItem) self._add(_cachedItem) logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__)) notify(CacheObjectCreatedEvent(_cachedItem, self)) return _cachedItem else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) self._update(_newCacheItem) notify(CacheObjectModifiedEvent(_newCacheItem, self)) return _newCacheItem return None def import_source(self, CachableSource): """Updates cache area and returns number of items updated with all available entries in ICachableSource """ _count = 0 self._import_source_items_id_list = set() # used to help speed up trim() for item in CachableSource.items(): self._import_source_items_id_list.add(item.getId()) if self.cache(item): _count += 1 return _count def reset(self): """Deletes all entries in the cache area""" if self.collname not in self.current_kv_names(): return # nothing to do # we'll simply delete the entire collection and then re-create it. r = self.request('delete', self.url+"storage/collections/data/"+self.collname) r.raise_for_status() self.initialize() #ITrimmableCacheArea def trim(self, source): if not ICachableSource.providedBy(source): #we'll fake a partial ICachableSource for use with import_source() source_type = type('FakeCachableSource', (object,), {}) _source = source #re-assign due to closure issue with source re-assignment below source_type.items = lambda self: _source source = source_type() updated = self.import_source(source) diff = self._all_ids() - self._import_source_items_id_list map(self._delete, diff) return (updated, len(diff), )
ScienceLogic/amiuploader
amiimporter/amiupload.py
parse_args
python
def parse_args(): parser = argparse.ArgumentParser(description="Uploads specified VMDK file to AWS s3 bucket, and converts to AMI") parser.add_argument('-r', '--aws_regions', type=str, nargs='+', required=True, help='list of AWS regions where uploaded ami should be copied. Available' ' regions: {}.'.format(AWSUtilities.aws_regions)) parser.add_argument('-a', '--aws_profile', type=str, required=True, help='AWS profile name to use for aws cli commands') parser.add_argument('-b', '--s3_bucket', type=str, required=True, help='The aws_bucket of the profile to upload and save vmdk to') parser.add_argument('-f', '--vmdk_upload_file', type=str, required=True, help="The file to upload if executing ") parser.add_argument('-n', '--ami_name', type=str, required=False, help='The name to give to the uploaded ami. ' 'Defaults to the name of the file') parser.add_argument('-d', '--directory', type=str, default=tempfile.mkdtemp(), help='Directory to save temp aws config upload files') args = parser.parse_args() if not args.ami_name: args.ami_name = os.path.basename(args.vmdk_upload_file) validate_args(args) return args
Argument parser and validator
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/amiupload.py#L17-L41
[ "def validate_args(args):\n \"\"\"\n Perform necessary validation checks\n :param args:\n :return:\n \"\"\"\n # print size of vm to be dl, if dl dir exists, check that file to uplad is a vmdk\n if not os.path.isdir(args.directory):\n print \"Directory {} does not exist\".format(args.directory)\n sys.exit(5)\n\n try:\n args.vmdk_upload_file = args.vmdk_upload_file\n except AttributeError:\n args.vmdk_upload_file = None\n\n if args.vmdk_upload_file and not os.path.isfile(args.vmdk_upload_file):\n print \"Specified file: {} does not exist\".format(args.vmdk_upload_file)\n sys.exit(5)\n\n aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket,\n args.aws_regions, args.ami_name, args.vmdk_upload_file)\n aws_importer.validate()\n" ]
#!/usr/bin/python import argparse import os import sys import tempfile import AWSUtilities # TODO: Add licensing # arg validation # pypi packaging info # requirements # readmes def validate_args(args): """ Perform necessary validation checks :param args: :return: """ # print size of vm to be dl, if dl dir exists, check that file to uplad is a vmdk if not os.path.isdir(args.directory): print "Directory {} does not exist".format(args.directory) sys.exit(5) try: args.vmdk_upload_file = args.vmdk_upload_file except AttributeError: args.vmdk_upload_file = None if args.vmdk_upload_file and not os.path.isfile(args.vmdk_upload_file): print "Specified file: {} does not exist".format(args.vmdk_upload_file) sys.exit(5) aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.validate() def vmdk_to_ami(args): """ Calls methods to perform vmdk import :param args: :return: """ aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.import_vmdk() def main(): args = parse_args() vmdk_to_ami(args) if __name__ == "__main__": main()
ScienceLogic/amiuploader
amiimporter/amiupload.py
validate_args
python
def validate_args(args): # print size of vm to be dl, if dl dir exists, check that file to uplad is a vmdk if not os.path.isdir(args.directory): print "Directory {} does not exist".format(args.directory) sys.exit(5) try: args.vmdk_upload_file = args.vmdk_upload_file except AttributeError: args.vmdk_upload_file = None if args.vmdk_upload_file and not os.path.isfile(args.vmdk_upload_file): print "Specified file: {} does not exist".format(args.vmdk_upload_file) sys.exit(5) aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.validate()
Perform necessary validation checks :param args: :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/amiupload.py#L44-L66
[ "def validate(self):\n \"\"\"\n Call instance validation methods\n :return:\n \"\"\"\n self.validate_regions()\n self.validate_bucket()\n self.validate_ec2_action()\n" ]
#!/usr/bin/python import argparse import os import sys import tempfile import AWSUtilities # TODO: Add licensing # arg validation # pypi packaging info # requirements # readmes def parse_args(): """ Argument parser and validator """ parser = argparse.ArgumentParser(description="Uploads specified VMDK file to AWS s3 bucket, and converts to AMI") parser.add_argument('-r', '--aws_regions', type=str, nargs='+', required=True, help='list of AWS regions where uploaded ami should be copied. Available' ' regions: {}.'.format(AWSUtilities.aws_regions)) parser.add_argument('-a', '--aws_profile', type=str, required=True, help='AWS profile name to use for aws cli commands') parser.add_argument('-b', '--s3_bucket', type=str, required=True, help='The aws_bucket of the profile to upload and save vmdk to') parser.add_argument('-f', '--vmdk_upload_file', type=str, required=True, help="The file to upload if executing ") parser.add_argument('-n', '--ami_name', type=str, required=False, help='The name to give to the uploaded ami. ' 'Defaults to the name of the file') parser.add_argument('-d', '--directory', type=str, default=tempfile.mkdtemp(), help='Directory to save temp aws config upload files') args = parser.parse_args() if not args.ami_name: args.ami_name = os.path.basename(args.vmdk_upload_file) validate_args(args) return args def vmdk_to_ami(args): """ Calls methods to perform vmdk import :param args: :return: """ aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.import_vmdk() def main(): args = parse_args() vmdk_to_ami(args) if __name__ == "__main__": main()
ScienceLogic/amiuploader
amiimporter/amiupload.py
vmdk_to_ami
python
def vmdk_to_ami(args): aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.import_vmdk()
Calls methods to perform vmdk import :param args: :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/amiupload.py#L69-L77
[ "def import_vmdk(self):\n \"\"\"\n All actions necessary to import vmdk (calls s3 upload, and import to aws ec2)\n :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric\n execution\n :return:\n \"\"\"\n # Set the inital upload to be the first region in the list\n first_upload_region = self.aws_regions[0]\n\n print \"Initial AMI will be created in: {}\".format(first_upload_region)\n self.upload_to_s3(region=first_upload_region)\n # If the upload was successful, the name to reference for import is now the basename\n description = \"AMI upload of: {}\".format(os.path.basename(self.upload_file))\n temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description)\n import_id = self.run_ec2_import(file_location, description, first_upload_region)\n self.wait_for_import_to_complete(import_id)\n self.rename_image(import_id, self.ami_name, source_region=first_upload_region)\n return import_id\n" ]
#!/usr/bin/python import argparse import os import sys import tempfile import AWSUtilities # TODO: Add licensing # arg validation # pypi packaging info # requirements # readmes def parse_args(): """ Argument parser and validator """ parser = argparse.ArgumentParser(description="Uploads specified VMDK file to AWS s3 bucket, and converts to AMI") parser.add_argument('-r', '--aws_regions', type=str, nargs='+', required=True, help='list of AWS regions where uploaded ami should be copied. Available' ' regions: {}.'.format(AWSUtilities.aws_regions)) parser.add_argument('-a', '--aws_profile', type=str, required=True, help='AWS profile name to use for aws cli commands') parser.add_argument('-b', '--s3_bucket', type=str, required=True, help='The aws_bucket of the profile to upload and save vmdk to') parser.add_argument('-f', '--vmdk_upload_file', type=str, required=True, help="The file to upload if executing ") parser.add_argument('-n', '--ami_name', type=str, required=False, help='The name to give to the uploaded ami. ' 'Defaults to the name of the file') parser.add_argument('-d', '--directory', type=str, default=tempfile.mkdtemp(), help='Directory to save temp aws config upload files') args = parser.parse_args() if not args.ami_name: args.ami_name = os.path.basename(args.vmdk_upload_file) validate_args(args) return args def validate_args(args): """ Perform necessary validation checks :param args: :return: """ # print size of vm to be dl, if dl dir exists, check that file to uplad is a vmdk if not os.path.isdir(args.directory): print "Directory {} does not exist".format(args.directory) sys.exit(5) try: args.vmdk_upload_file = args.vmdk_upload_file except AttributeError: args.vmdk_upload_file = None if args.vmdk_upload_file and not os.path.isfile(args.vmdk_upload_file): print "Specified file: {} does not exist".format(args.vmdk_upload_file) sys.exit(5) aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.validate() def main(): args = parse_args() vmdk_to_ami(args) if __name__ == "__main__": main()
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
parse_image_json
python
def parse_image_json(text): image_details = json.loads(text) if image_details.get('Images') is not None: try: image_details = image_details.get('Images')[0] except IndexError: image_details = None return image_details
parses response output of AWS describe commands and returns the first (and only) item in array :param text: describe output :return: image json
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L12-L24
null
import sys import tempfile import os import shlex import subprocess import json import time aws_regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2', 'ap-northeast-1', 'ap-northeast-2', 'ap-southeast-2', 'ap-south-1', 'sa-east-1'] class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.validate_regions
python
def validate_regions(self): for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions)
Validate the user specified regions are valid :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L54-L62
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.validate_ec2_action
python
def validate_ec2_action(self): import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5)
Attempt to validate that the provided user has permissions to import an AMI :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L64-L81
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.validate_bucket
python
def validate_bucket(self): s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5)
Do a quick check to see if the s3 bucket is valid :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L83-L97
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.get_image_id_by_name
python
def get_image_id_by_name(self, ami_name, region='us-east-1'): image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id
Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L99-L127
[ "def parse_image_json(text):\n \"\"\"\n parses response output of AWS describe commands and returns the first (and only) item in array\n :param text: describe output\n :return: image json\n \"\"\"\n image_details = json.loads(text)\n if image_details.get('Images') is not None:\n try:\n image_details = image_details.get('Images')[0]\n except IndexError:\n image_details = None\n return image_details\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.copy_ami_to_new_name
python
def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region)
Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L129-L155
[ "def wait_for_copy_available(self, image_id, region):\n \"\"\"\n Wait for the newly copied ami to become available\n :param image_id: image id to monitor\n :param region: region to monitor copy\n \"\"\"\n waiting = True\n\n describe_image_cmd = \"aws ec2 --profile {} --region {} --output json describe-images --image-id {}\"\\\n .format(self.aws_project, region, image_id)\n while waiting:\n res = subprocess.check_output(shlex.split(describe_image_cmd))\n print \"described image returned: {}\".format(res)\n image_json = parse_image_json(res)\n image_state = image_json['State']\n if image_state == 'available':\n print \"Copied AMI is renamed and ready to use!\"\n return\n elif image_state == 'failed':\n print \"Copied AMI failed for some reason...\"\n sys.exit(5)\n else:\n print \"image state is currently: {}\".format(image_state)\n print \"Sleeping for 30 seconds...\"\n time.sleep(30)\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.deregister_image
python
def deregister_image(self, ami_id, region='us-east-1'): deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command"
Deregister an AMI by id :param ami_id: :param region: region to deregister from :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L157-L170
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.wait_for_copy_available
python
def wait_for_copy_available(self, image_id, region): waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30)
Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L172-L196
[ "def parse_image_json(text):\n \"\"\"\n parses response output of AWS describe commands and returns the first (and only) item in array\n :param text: describe output\n :return: image json\n \"\"\"\n image_details = json.loads(text)\n if image_details.get('Images') is not None:\n try:\n image_details = image_details.get('Images')[0]\n except IndexError:\n image_details = None\n return image_details\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.rename_image
python
def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region)
Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L198-L208
[ "def get_image_id_by_name(self, ami_name, region='us-east-1'):\n \"\"\"\n Locate an AMI image id by name in a particular region\n :param ami_name: ami name you need the id for\n :param region: the region the image exists in\n :return: id of the image\n \"\"\"\n image_details = None\n detail_query_attempts = 0\n\n while image_details is None:\n describe_cmd = \"aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}\"\\\n .format(ami_name, self.aws_project, region)\n res = subprocess.check_output(shlex.split(describe_cmd))\n\n print \"describe command returned: {}\".format(res)\n\n image_details = parse_image_json(res)\n if not image_details:\n if detail_query_attempts > 5:\n print \"Tried to get image details 5 times and failed, exiting\"\n raise Exception(\"Unable to get AMI image id from AWS using the image name\")\n time.sleep(10)\n print \"No images defined returned yet, will try another query\"\n detail_query_attempts += 1\n\n image_id = image_details['ImageId']\n print \"located image id: {}\".format(image_id)\n return image_id\n", "def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'):\n \"\"\"\n Copies an AMI from the default region and name to the desired name and region\n :param ami_id: ami id to copy\n :param new_name: name of the new ami to create\n :param source_region: the source region of the ami to copy\n \"\"\"\n\n new_image_ids = []\n\n for region in self.aws_regions:\n copy_img_cmd = \"aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}\"\\\n .format(ami_id, self.aws_project, source_region, region, new_name)\n res = subprocess.check_output(shlex.split(copy_img_cmd))\n\n print \"Copy cmd returned: {}\".format(res)\n\n new_image_id = json.loads(res).get('ImageId')\n new_image_ids.append((new_image_id, region))\n\n print \"new image Id is: {}\".format(new_image_id)\n\n print \"monitoring the copies for the following regions/id : {}\".format(new_image_ids)\n for tupp in new_image_ids:\n image_id = tupp[0]\n image_region = tupp[1]\n self.wait_for_copy_available(image_id, image_region)\n", "def deregister_image(self, ami_id, region='us-east-1'):\n \"\"\"\n Deregister an AMI by id\n :param ami_id:\n :param region: region to deregister from\n :return:\n \"\"\"\n deregister_cmd = \"aws ec2 --profile {} --region {} deregister-image --image-id {}\"\\\n .format(self.aws_project, region, ami_id)\n print \"De-registering old image, now that the new one exists.\"\n print \"De-registering cmd: {}\".format(deregister_cmd)\n res = subprocess.check_output(shlex.split(deregister_cmd))\n print \"Response: {}\".format(res)\n print \"Not monitoring de-register command\"\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.create_config_file
python
def create_config_file(self, vmdk_location, description): description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file
Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L210-L231
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.run_ec2_import
python
def run_ec2_import(self, config_file_location, description, region='us-east-1'): import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id
Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L233-L253
[ "def check_task_status_and_id(task_json):\n \"\"\"\n Read status of import json and parse\n :param task_json: status json to parse\n :return: (stillRunning, imageId)\n \"\"\"\n if task_json.get('ImportImageTasks') is not None:\n task = task_json['ImportImageTasks'][0]\n else:\n task = task_json\n\n current_status = task['Status']\n image_id = task['ImportTaskId']\n if current_status == 'completed':\n print \"The import has completed succesfully as ID: {}\".format(image_id)\n return False, image_id\n elif current_status == 'deleting':\n print \"The import job has been cancelled for some reason\"\n return False, None\n elif current_status == 'deleted':\n print \"The import job was cancelled\"\n return False, None\n else:\n print \"The current import job for id {} status is: {}\".format(image_id, current_status)\n print \"sleeping for 30 seconds\"\n time.sleep(30)\n return True, image_id\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.upload_to_s3
python
def upload_to_s3(self, region='us-east-1'): s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully"
Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L255-L275
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.wait_for_import_to_complete
python
def wait_for_import_to_complete(self, import_id, region='us-east-1'): task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json)
Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L277-L288
[ "def check_task_status_and_id(task_json):\n \"\"\"\n Read status of import json and parse\n :param task_json: status json to parse\n :return: (stillRunning, imageId)\n \"\"\"\n if task_json.get('ImportImageTasks') is not None:\n task = task_json['ImportImageTasks'][0]\n else:\n task = task_json\n\n current_status = task['Status']\n image_id = task['ImportTaskId']\n if current_status == 'completed':\n print \"The import has completed succesfully as ID: {}\".format(image_id)\n return False, image_id\n elif current_status == 'deleting':\n print \"The import job has been cancelled for some reason\"\n return False, None\n elif current_status == 'deleted':\n print \"The import job was cancelled\"\n return False, None\n else:\n print \"The current import job for id {} status is: {}\".format(image_id, current_status)\n print \"sleeping for 30 seconds\"\n time.sleep(30)\n return True, image_id\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.check_task_status_and_id
python
def check_task_status_and_id(task_json): if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id
Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId)
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L291-L317
null
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def import_vmdk(self): """ All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return: """ # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
AWSUtils.import_vmdk
python
def import_vmdk(self): # Set the inital upload to be the first region in the list first_upload_region = self.aws_regions[0] print "Initial AMI will be created in: {}".format(first_upload_region) self.upload_to_s3(region=first_upload_region) # If the upload was successful, the name to reference for import is now the basename description = "AMI upload of: {}".format(os.path.basename(self.upload_file)) temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description) import_id = self.run_ec2_import(file_location, description, first_upload_region) self.wait_for_import_to_complete(import_id) self.rename_image(import_id, self.ami_name, source_region=first_upload_region) return import_id
All actions necessary to import vmdk (calls s3 upload, and import to aws ec2) :param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric execution :return:
train
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L319-L337
[ "def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'):\n \"\"\"\n Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS)\n :param ami_name:\n :param new_ami_name:\n :return:\n \"\"\"\n print \"Re-naming/moving AMI to desired name and region\"\n image_id = self.get_image_id_by_name(ami_name, source_region)\n self.copy_ami_to_new_name(image_id, new_ami_name, source_region)\n self.deregister_image(image_id, source_region)\n", "def create_config_file(self, vmdk_location, description):\n \"\"\"\n Create the aws import config file\n :param vmdk_location: location of downloaded VMDK\n :param description: description to use for config_file creation\n :return: config file descriptor, config file full path\n \"\"\"\n description = description\n format = \"vmdk\"\n user_bucket = {\n \"S3Bucket\": self.bucket_name,\n \"S3Key\": vmdk_location\n }\n parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket}\n obj_list = [parent_obj]\n\n temp_fd, temp_file = tempfile.mkstemp()\n print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file)\n\n with os.fdopen(temp_fd, 'w') as f:\n json.dump(obj_list, f)\n return temp_fd, temp_file\n", "def run_ec2_import(self, config_file_location, description, region='us-east-1'):\n \"\"\"\n Runs the command to import an uploaded vmdk to aws ec2\n :param config_file_location: config file of import param location\n :param description: description to attach to the import task\n :return: the import task id for the given ami\n \"\"\"\n import_cmd = \"aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'\" \\\n \" --disk-containers file://{}\"\\\n .format(description, self.aws_project, region, config_file_location)\n try:\n res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print \"Error importing to ec2\"\n print \"output: {}\".format(e.output)\n sys.exit(5)\n\n print \"got res: {}\".format(res)\n res_json = json.loads(res)\n task_running, import_id = self.check_task_status_and_id(res_json)\n return import_id\n", "def upload_to_s3(self, region='us-east-1'):\n \"\"\"\n Uploads the vmdk file to aws s3\n :param file_location: location of vmdk\n :return:\n \"\"\"\n s3_import_cmd = \"aws s3 cp {} s3://{} --profile '{}' --region {}\".format(self.upload_file, self.bucket_name,\n self.aws_project, region)\n print \"Uploading to bucket {} in s3 with the cmd: {}\".format(self.bucket_name, s3_import_cmd)\n # s3 upload puts DL progress to stderr\n s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE)\n while True:\n progress = s3_upload.stderr.readline()\n if progress == '' and s3_upload.poll() is not None:\n break\n if progress:\n print (progress)\n rc = s3_upload.poll()\n if rc != 0:\n raise subprocess.CalledProcessError(rc)\n print \"Upload completed successfully\"\n", "def wait_for_import_to_complete(self, import_id, region='us-east-1'):\n \"\"\"\n Monitors the status of aws import, waiting for it to complete, or error out\n :param import_id: id of import task to monitor\n \"\"\"\n task_running = True\n while task_running:\n import_status_cmd = \"aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}\".format(self.aws_project, region, import_id)\n res = subprocess.check_output(shlex.split(import_status_cmd))\n print \"Current status: {}\".format(res)\n res_json = json.loads(res)\n task_running, image_id = self.check_task_status_and_id(res_json)\n" ]
class AWSUtils: """ Methods necessary to perform VM imports """ def __init__(self, config_save_dir, aws_profile, bucket, regions, ami_name, upload_file): """ Instantiate with common properties for all VM imports to AWS :param config_save_dir: where to save aws config files :param aws_project: which aws_project to upload to :param profile: which aws credential profile to use :param region: which aws region to impot AMI into """ self.aws_project = aws_profile self.aws_regions = regions self.config_save_dir = config_save_dir self.bucket_name = bucket self.ami_name = ami_name self.upload_file = upload_file def validate(self): """ Call instance validation methods :return: """ self.validate_regions() self.validate_bucket() self.validate_ec2_action() def validate_regions(self): """ Validate the user specified regions are valid :return: """ for region in self.aws_regions: if region not in aws_regions: print "Error: Specified region: {} is not a valid aws_region".format(region) print "Valid regions are: {}".format(aws_regions) def validate_ec2_action(self): """ Attempt to validate that the provided user has permissions to import an AMI :return: """ import_cmd = 'aws ec2 import-image --dry-run --profile {} --region {}'\ .format(self.aws_project, self.aws_regions[0]) print "Attempting ec2 import dry run: {}".format(import_cmd) try: subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if "(DryRunOperation)" in e.output: # If it failed because of a dry run (what we asked for) then this except can be ignored print "Dry run operation successful!" return print "Error: {}".format(e.output) print "It doesn't seem like your user has the required permissions to import an ami image from s3" sys.exit(5) def validate_bucket(self): """ Do a quick check to see if the s3 bucket is valid :return: """ s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project, self.aws_regions[0]) print "Checking for s3 bucket" try: subprocess.check_output(shlex.split(s3_check_cmd)) except subprocess.CalledProcessError as e: print "Error: {}".format(e) print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\ .format(self.bucket_name) sys.exit(5) def get_image_id_by_name(self, ami_name, region='us-east-1'): """ Locate an AMI image id by name in a particular region :param ami_name: ami name you need the id for :param region: the region the image exists in :return: id of the image """ image_details = None detail_query_attempts = 0 while image_details is None: describe_cmd = "aws ec2 describe-images --filters 'Name=name,Values={}' --profile '{}' --region {}"\ .format(ami_name, self.aws_project, region) res = subprocess.check_output(shlex.split(describe_cmd)) print "describe command returned: {}".format(res) image_details = parse_image_json(res) if not image_details: if detail_query_attempts > 5: print "Tried to get image details 5 times and failed, exiting" raise Exception("Unable to get AMI image id from AWS using the image name") time.sleep(10) print "No images defined returned yet, will try another query" detail_query_attempts += 1 image_id = image_details['ImageId'] print "located image id: {}".format(image_id) return image_id def copy_ami_to_new_name(self, ami_id, new_name, source_region='us-east-1'): """ Copies an AMI from the default region and name to the desired name and region :param ami_id: ami id to copy :param new_name: name of the new ami to create :param source_region: the source region of the ami to copy """ new_image_ids = [] for region in self.aws_regions: copy_img_cmd = "aws ec2 copy-image --source-image-id {} --profile {} --source-region {} --region {} --name {}"\ .format(ami_id, self.aws_project, source_region, region, new_name) res = subprocess.check_output(shlex.split(copy_img_cmd)) print "Copy cmd returned: {}".format(res) new_image_id = json.loads(res).get('ImageId') new_image_ids.append((new_image_id, region)) print "new image Id is: {}".format(new_image_id) print "monitoring the copies for the following regions/id : {}".format(new_image_ids) for tupp in new_image_ids: image_id = tupp[0] image_region = tupp[1] self.wait_for_copy_available(image_id, image_region) def deregister_image(self, ami_id, region='us-east-1'): """ Deregister an AMI by id :param ami_id: :param region: region to deregister from :return: """ deregister_cmd = "aws ec2 --profile {} --region {} deregister-image --image-id {}"\ .format(self.aws_project, region, ami_id) print "De-registering old image, now that the new one exists." print "De-registering cmd: {}".format(deregister_cmd) res = subprocess.check_output(shlex.split(deregister_cmd)) print "Response: {}".format(res) print "Not monitoring de-register command" def wait_for_copy_available(self, image_id, region): """ Wait for the newly copied ami to become available :param image_id: image id to monitor :param region: region to monitor copy """ waiting = True describe_image_cmd = "aws ec2 --profile {} --region {} --output json describe-images --image-id {}"\ .format(self.aws_project, region, image_id) while waiting: res = subprocess.check_output(shlex.split(describe_image_cmd)) print "described image returned: {}".format(res) image_json = parse_image_json(res) image_state = image_json['State'] if image_state == 'available': print "Copied AMI is renamed and ready to use!" return elif image_state == 'failed': print "Copied AMI failed for some reason..." sys.exit(5) else: print "image state is currently: {}".format(image_state) print "Sleeping for 30 seconds..." time.sleep(30) def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region) def create_config_file(self, vmdk_location, description): """ Create the aws import config file :param vmdk_location: location of downloaded VMDK :param description: description to use for config_file creation :return: config file descriptor, config file full path """ description = description format = "vmdk" user_bucket = { "S3Bucket": self.bucket_name, "S3Key": vmdk_location } parent_obj = {'Description': description, 'Format': format, 'UserBucket': user_bucket} obj_list = [parent_obj] temp_fd, temp_file = tempfile.mkstemp() print 'creating tmp file for {} at {}'.format(vmdk_location, temp_file) with os.fdopen(temp_fd, 'w') as f: json.dump(obj_list, f) return temp_fd, temp_file def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id def upload_to_s3(self, region='us-east-1'): """ Uploads the vmdk file to aws s3 :param file_location: location of vmdk :return: """ s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name, self.aws_project, region) print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd) # s3 upload puts DL progress to stderr s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE) while True: progress = s3_upload.stderr.readline() if progress == '' and s3_upload.poll() is not None: break if progress: print (progress) rc = s3_upload.poll() if rc != 0: raise subprocess.CalledProcessError(rc) print "Upload completed successfully" def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json) @staticmethod def check_task_status_and_id(task_json): """ Read status of import json and parse :param task_json: status json to parse :return: (stillRunning, imageId) """ if task_json.get('ImportImageTasks') is not None: task = task_json['ImportImageTasks'][0] else: task = task_json current_status = task['Status'] image_id = task['ImportTaskId'] if current_status == 'completed': print "The import has completed succesfully as ID: {}".format(image_id) return False, image_id elif current_status == 'deleting': print "The import job has been cancelled for some reason" return False, None elif current_status == 'deleted': print "The import job was cancelled" return False, None else: print "The current import job for id {} status is: {}".format(image_id, current_status) print "sleeping for 30 seconds" time.sleep(30) return True, image_id
rgmining/ria
ria/one.py
BipartiteGraph.update
python
def update(self): if self.updated: return 0 res = super(BipartiteGraph, self).update() self.updated = True return res
Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/one.py#L43-L55
[ "def update(self):\n \"\"\"Update reviewers' anomalous scores and products' summaries.\n\n Returns:\n maximum absolute difference between old summary and new one, and\n old anomalous score and new one.\n \"\"\"\n w = self._weight_generator(self.reviewers)\n diff_p = max(p.update_summary(w) for p in self.products)\n diff_a = max(r.update_anomalous_score() for r in self.reviewers)\n return max(diff_p, diff_a)\n" ]
class BipartiteGraph(bipartite.BipartiteGraph): """Bipartite graph implementing One algorithm. Attributes: updated: Whether :meth:`update` has been called. If True, that method does nothing. """ def __init__(self, **kwargs): super(BipartiteGraph, self).__init__(**kwargs) self.updated = False
rgmining/ria
ria/bipartite.py
Reviewer.anomalous_score
python
def anomalous_score(self): return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers)
Anomalous score of this reviewer. Initial anomalous score is :math:`1 / |R|` where :math:`R` is a set of reviewers.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L151-L157
null
class Reviewer(_Node): """A node class representing Reviewer. Args: graph: an instance of BipartiteGraph representing the parent graph. credibility: an instance of credibility.Credibility to be used to update scores. name: name of this node. (default: None) anomalous: initial anomalous score. (default: None) """ __slots__ = ("_anomalous", "_credibility") def __init__(self, graph, credibility, name=None, anomalous=None): super(Reviewer, self).__init__(graph, name) self._anomalous = anomalous self._credibility = credibility @property @anomalous_score.setter def anomalous_score(self, v): """Set an anomalous score. Args: v: the new anomalous score. """ self._anomalous = float(v) def update_anomalous_score(self): """Update anomalous score. New anomalous score is a weighted average of differences between current summary and reviews. The weights come from credibilities. Therefore, the new anomalous score of reviewer :math:`p` is as .. math:: {\\rm anomalous}(r) = \\frac{ \\sum_{p \\in P} {\\rm credibility}(p)| {\\rm review}(r, p)-{\\rm summary}(p)| }{ \\sum_{p \\in P} {\\rm credibility}(p) } where :math:`P` is a set of products reviewed by reviewer :math:`p`, review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are summary and credibility of product :math:`p`, respectively. Returns: absolute difference between old anomalous score and updated one. """ products = self._graph.retrieve_products(self) diffs = [ p.summary.difference(self._graph.retrieve_review(self, p)) for p in products ] old = self.anomalous_score try: self.anomalous_score = np.average( diffs, weights=list(map(self._credibility, products))) except ZeroDivisionError: self.anomalous_score = np.average(diffs) return abs(self.anomalous_score - old)
rgmining/ria
ria/bipartite.py
Reviewer.update_anomalous_score
python
def update_anomalous_score(self): products = self._graph.retrieve_products(self) diffs = [ p.summary.difference(self._graph.retrieve_review(self, p)) for p in products ] old = self.anomalous_score try: self.anomalous_score = np.average( diffs, weights=list(map(self._credibility, products))) except ZeroDivisionError: self.anomalous_score = np.average(diffs) return abs(self.anomalous_score - old)
Update anomalous score. New anomalous score is a weighted average of differences between current summary and reviews. The weights come from credibilities. Therefore, the new anomalous score of reviewer :math:`p` is as .. math:: {\\rm anomalous}(r) = \\frac{ \\sum_{p \\in P} {\\rm credibility}(p)| {\\rm review}(r, p)-{\\rm summary}(p)| }{ \\sum_{p \\in P} {\\rm credibility}(p) } where :math:`P` is a set of products reviewed by reviewer :math:`p`, review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are summary and credibility of product :math:`p`, respectively. Returns: absolute difference between old anomalous score and updated one.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L168-L206
null
class Reviewer(_Node): """A node class representing Reviewer. Args: graph: an instance of BipartiteGraph representing the parent graph. credibility: an instance of credibility.Credibility to be used to update scores. name: name of this node. (default: None) anomalous: initial anomalous score. (default: None) """ __slots__ = ("_anomalous", "_credibility") def __init__(self, graph, credibility, name=None, anomalous=None): super(Reviewer, self).__init__(graph, name) self._anomalous = anomalous self._credibility = credibility @property def anomalous_score(self): """Anomalous score of this reviewer. Initial anomalous score is :math:`1 / |R|` where :math:`R` is a set of reviewers. """ return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers) @anomalous_score.setter def anomalous_score(self, v): """Set an anomalous score. Args: v: the new anomalous score. """ self._anomalous = float(v)
rgmining/ria
ria/bipartite.py
Product.summary
python
def summary(self): if self._summary: return self._summary reviewers = self._graph.retrieve_reviewers(self) return self._summary_cls( [self._graph.retrieve_review(r, self) for r in reviewers])
Summary of reviews for this product. Initial summary is computed by .. math:: \\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r), where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L226-L242
null
class Product(_Node): """A node class representing Product. Args: graph: An instance of BipartiteGraph representing the parent graph. name: Name of this node. (default: None) summary_cls: Specify summary type. (default: AverageSummary) """ __slots__ = ("_summary", "_summary_cls") def __init__(self, graph, name=None, summary_cls=AverageSummary): super(Product, self).__init__(graph, name) self._summary = None self._summary_cls = summary_cls @property @summary.setter def summary(self, v): """Set summary. Args: v: A new summary. It could be a single number or lists. """ if hasattr(v, "__iter__"): self._summary = self._summary_cls(v) else: self._summary = self._summary_cls(float(v)) def update_summary(self, w): """Update summary. The new summary is a weighted average of reviews i.e. .. math:: \\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)} {\\sum_{r \\in R} \\mbox{weight}(r)}, where :math:`R` is a set of reviewers reviewing this product, :math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are the review and weight of the reviewer :math:`r`, respectively. Args: w: A weight function. Returns: absolute difference between old summary and updated one. """ old = self.summary.v # pylint: disable=no-member reviewers = self._graph.retrieve_reviewers(self) reviews = [self._graph.retrieve_review( r, self).score for r in reviewers] weights = [w(r.anomalous_score) for r in reviewers] if sum(weights) == 0: self.summary = np.mean(reviews) else: self.summary = np.average(reviews, weights=weights) return abs(self.summary.v - old) # pylint: disable=no-member
rgmining/ria
ria/bipartite.py
Product.summary
python
def summary(self, v): if hasattr(v, "__iter__"): self._summary = self._summary_cls(v) else: self._summary = self._summary_cls(float(v))
Set summary. Args: v: A new summary. It could be a single number or lists.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L245-L254
null
class Product(_Node): """A node class representing Product. Args: graph: An instance of BipartiteGraph representing the parent graph. name: Name of this node. (default: None) summary_cls: Specify summary type. (default: AverageSummary) """ __slots__ = ("_summary", "_summary_cls") def __init__(self, graph, name=None, summary_cls=AverageSummary): super(Product, self).__init__(graph, name) self._summary = None self._summary_cls = summary_cls @property def summary(self): """Summary of reviews for this product. Initial summary is computed by .. math:: \\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r), where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`. """ if self._summary: return self._summary reviewers = self._graph.retrieve_reviewers(self) return self._summary_cls( [self._graph.retrieve_review(r, self) for r in reviewers]) @summary.setter def update_summary(self, w): """Update summary. The new summary is a weighted average of reviews i.e. .. math:: \\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)} {\\sum_{r \\in R} \\mbox{weight}(r)}, where :math:`R` is a set of reviewers reviewing this product, :math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are the review and weight of the reviewer :math:`r`, respectively. Args: w: A weight function. Returns: absolute difference between old summary and updated one. """ old = self.summary.v # pylint: disable=no-member reviewers = self._graph.retrieve_reviewers(self) reviews = [self._graph.retrieve_review( r, self).score for r in reviewers] weights = [w(r.anomalous_score) for r in reviewers] if sum(weights) == 0: self.summary = np.mean(reviews) else: self.summary = np.average(reviews, weights=weights) return abs(self.summary.v - old) # pylint: disable=no-member
rgmining/ria
ria/bipartite.py
Product.update_summary
python
def update_summary(self, w): old = self.summary.v # pylint: disable=no-member reviewers = self._graph.retrieve_reviewers(self) reviews = [self._graph.retrieve_review( r, self).score for r in reviewers] weights = [w(r.anomalous_score) for r in reviewers] if sum(weights) == 0: self.summary = np.mean(reviews) else: self.summary = np.average(reviews, weights=weights) return abs(self.summary.v - old)
Update summary. The new summary is a weighted average of reviews i.e. .. math:: \\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)} {\\sum_{r \\in R} \\mbox{weight}(r)}, where :math:`R` is a set of reviewers reviewing this product, :math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are the review and weight of the reviewer :math:`r`, respectively. Args: w: A weight function. Returns: absolute difference between old summary and updated one.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L256-L286
null
class Product(_Node): """A node class representing Product. Args: graph: An instance of BipartiteGraph representing the parent graph. name: Name of this node. (default: None) summary_cls: Specify summary type. (default: AverageSummary) """ __slots__ = ("_summary", "_summary_cls") def __init__(self, graph, name=None, summary_cls=AverageSummary): super(Product, self).__init__(graph, name) self._summary = None self._summary_cls = summary_cls @property def summary(self): """Summary of reviews for this product. Initial summary is computed by .. math:: \\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r), where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`. """ if self._summary: return self._summary reviewers = self._graph.retrieve_reviewers(self) return self._summary_cls( [self._graph.retrieve_review(r, self) for r in reviewers]) @summary.setter def summary(self, v): """Set summary. Args: v: A new summary. It could be a single number or lists. """ if hasattr(v, "__iter__"): self._summary = self._summary_cls(v) else: self._summary = self._summary_cls(float(v)) # pylint: disable=no-member
rgmining/ria
ria/bipartite.py
BipartiteGraph.new_reviewer
python
def new_reviewer(self, name, anomalous=None): n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n
Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L333-L347
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.new_product
python
def new_product(self, name): n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n
Create a new product. Args: name: name of the new product. Returns: A new product instance.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L349-L361
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.add_review
python
def add_review(self, reviewer, product, review, date=None): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L363-L389
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.retrieve_products
python
def retrieve_products(self, reviewer): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer))
Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L392-L409
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.retrieve_reviewers
python
def retrieve_reviewers(self, product): if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product))
Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L412-L429
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.retrieve_review
python
def retrieve_review(self, reviewer, product): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product))
Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L432-L460
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.update
python
def update(self): w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a)
Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L462-L472
[ "def _weight_generator(self, reviewers):\n \"\"\"Compute a weight function for the given reviewers.\n\n Args:\n reviewers: a set of reviewers to compute weight function.\n\n Returns:\n a function computing a weight for a reviewer.\n \"\"\"\n scores = [r.anomalous_score for r in reviewers]\n mu = np.average(scores)\n sigma = np.std(scores)\n\n if sigma:\n def w(v):\n \"\"\"Compute a weight for the given reviewer.\n\n Args:\n v: anomalous score of a reviewer.\n Returns:\n weight of the given anomalous score.\n \"\"\"\n try:\n exp = math.exp(self.alpha * (v - mu) / sigma)\n return 1. / (1. + exp)\n except OverflowError:\n return 0.\n\n return w\n\n else:\n # Sigma = 0 means all reviews have same anomalous scores.\n # In this case, all reviews should be treated as same.\n return lambda v: 1.\n" ]
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph._weight_generator
python
def _weight_generator(self, reviewers): scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1.
Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L474-L507
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def dump_credibilities(self, output): """Dump credibilities of all products. Args: output: a writable object. """ for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n") def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/bipartite.py
BipartiteGraph.dump_credibilities
python
def dump_credibilities(self, output): for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n")
Dump credibilities of all products. Args: output: a writable object.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L509-L520
null
class BipartiteGraph(object): """Bipartite graph model for review data mining. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: :class:`ria.credibility.WeightedCredibility`) reviewer: Class of reviewers. product: Class of products. Attributes: alpha: Parameter. graph: Graph object of networkx. reviewers: Collection of reviewers. products: Collection of products. credibility: Credibility object. """ def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): """Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products. """ self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product def new_reviewer(self, name, anomalous=None): """Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n def new_product(self, name): """Create a new product. Args: name: name of the new product. Returns: A new product instance. """ n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r @memoized def retrieve_products(self, reviewer): """Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer)) @memoized def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product)) @memoized def retrieve_review(self, reviewer, product): """Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product)) def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ w = self._weight_generator(self.reviewers) diff_p = max(p.update_summary(w) for p in self.products) diff_a = max(r.update_anomalous_score() for r in self.reviewers) return max(diff_p, diff_a) def _weight_generator(self, reviewers): """Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): """Compute a weight for the given reviewer. Args: v: anomalous score of a reviewer. Returns: weight of the given anomalous score. """ try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1. def to_pydot(self): """Convert this graph to PyDot object. Returns: PyDot object representing this graph. """ return nx.nx_pydot.to_pydot(self.graph)
rgmining/ria
ria/credibility.py
GraphBasedCredibility.review_score
python
def review_score(self, reviewer, product): return self._g.retrieve_review(reviewer, product).score
Find a review score from a given reviewer to a product. Args: reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`. product: Product i.e. an instance of :class:`ria.bipartite.Product`. Returns: A review object representing the review from the reviewer to the product.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/credibility.py#L109-L119
null
class GraphBasedCredibility(object): """Abstract class of credibility using a Bipartite graph. Args: g: A bipartite graph instance. This class provides two helper methods; :meth:`reviewers` and :meth:`review_score`. """ __slots__ = ("_g") def __init__(self, g): """Construct a GraphBasedCredibility with a given graph instance g. Args: g: A bipartite graph instance. """ self._g = g def __call__(self, product): """Compute credibility of a given product. Args: product: An instance of :class:`ria.bipartite.Product`. """ raise NotImplementedError def reviewers(self, product): """Find reviewers who have reviewed a given product. Args: product: An instance of :class:`ria.bipartite.Product`. Returns: A list of reviewers who have reviewed the product. """ return self._g.retrieve_reviewers(product)
rgmining/ria
ria/bipartite_sum.py
Reviewer.update_anomalous_score
python
def update_anomalous_score(self): old = self.anomalous_score products = self._graph.retrieve_products(self) self.anomalous_score = sum( p.summary.difference( self._graph.retrieve_review(self, p)) * self._credibility(p) - 0.5 for p in products ) return abs(self.anomalous_score - old)
Update anomalous score. New anomalous score is the summation of weighted differences between current summary and reviews. The weights come from credibilities. Therefore, the new anomalous score is defined as .. math:: {\\rm anomalous}(r) = \\sum_{p \\in P} \\mbox{review}(p) \\times \\mbox{credibility}(p) - 0.5 where :math:`P` is a set of products reviewed by this reviewer, review(:math:`p`) and credibility(:math:`p`) are review and credibility of product :math:`p`, respectively. Returns: absolute difference between old anomalous score and updated one.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite_sum.py#L38-L67
null
class Reviewer(bipartite.Reviewer): """Reviewer which uses normalized summations for updated anomalous scores. This reviewer will update its anomalous score by computing summation of partial anomalous scores instead of using a weighted average. """ __slots__ = ()
rgmining/ria
ria/bipartite_sum.py
BipartiteGraph.update
python
def update(self): res = super(BipartiteGraph, self).update() max_v = None min_v = float("inf") for r in self.reviewers: max_v = max(max_v, r.anomalous_score) min_v = min(min_v, r.anomalous_score) width = max_v - min_v if width: for r in self.reviewers: r.anomalous_score = (r.anomalous_score - min_v) / width return res
Update reviewers' anomalous scores and products' summaries. The update consists of 2 steps; Step1 (updating summaries): Update summaries of products with anomalous scores of reviewers and weight function. The weight is calculated by the manner in :class:`ria.bipartite.BipartiteGraph`. Step2 (updating anomalous scores): Update its anomalous score of each reviewer by computing the summation of deviation times credibility. See :meth:`Reviewer.update_anomalous_score` for more details. After that those updated anomalous scores are normalized so that every value is in :math:`[0, 1]`. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. This value is not normalized and thus it may be grater than actual normalized difference.
train
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite_sum.py#L85-L120
[ "def update(self):\n \"\"\"Update reviewers' anomalous scores and products' summaries.\n\n Returns:\n maximum absolute difference between old summary and new one, and\n old anomalous score and new one.\n \"\"\"\n w = self._weight_generator(self.reviewers)\n diff_p = max(p.update_summary(w) for p in self.products)\n diff_a = max(r.update_anomalous_score() for r in self.reviewers)\n return max(diff_p, diff_a)\n" ]
class BipartiteGraph(bipartite.BipartiteGraph): """Bipartite Graph implementing OneSum algorithm. This graph employs a normalized summation of deviation times credibility as the undated anomalous scores for each reviewer. Constructor receives as same arguments as :class:`ria.bipartite.BipartiteGraph` but `reviewer` argument is ignored since this graph uses :class:`ria.bipartite_sum.Reviewer` instead. """ def __init__(self, **kwargs): kwargs["reviewer"] = Reviewer super(BipartiteGraph, self).__init__(**kwargs)
mattbierner/blotre-py
blotre.py
create_disposable
python
def create_disposable(clientInfo, config = {}): response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config)
Create a new disposable client.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L328-L346
[ "def _extend(dict1, dict2):\n extended = dict1.copy()\n extended.update(dict2)\n return extended\n", "def _format_url(config, relPath, query={}):\n return urlunparse((\n config.get('protocol'),\n config.get('host'),\n relPath,\n '',\n urlencode(query),\n ''))\n" ]
import json import os import re import requests try: from urllib import urlencode from urlparse import urlunparse except ImportError: from urllib.parse import urlencode from urllib.parse import urlunparse _JSON_HEADERS = { 'accepts': 'application/json', 'content-type': 'application/json' } ROOT = '/v0/' API_ROOT = ROOT + 'api/' OAUTH2_ROOT = ROOT + 'oauth2/' DEFAULT_CONFIG = { 'protocol': 'https', 'host': 'blot.re', } def _extend(dict1, dict2): extended = dict1.copy() extended.update(dict2) return extended class TokenEndpointError(Exception): """ Error communicating with the token endpoint. """ def __init__(self, error, error_description): self.error = error self.error_description = error_description super(TokenEndpointError, self).__init__( "[%s] %s" % (self.error, self.error_description)) def _token_error_from_data(data): return TokenEndpointError( data.get('error', ''), data.get('error_description', '')) class RestError(Exception): """ Error response from one of the REST APIS. """ def __init__(self, status_code, error_description, details): self.status_code = status_code self.error_description = error_description self.details = details super(RestError, self).__init__( "[%s] %s" % (self.status_code, self.error_description)) def _is_error_response(body): return body.get('type', '') == 'Error' or 'error' in body def _rest_error_from_response(response): body = response.json() return RestError( response.status_code, body['error'], body.get('details', None)) def _format_url(config, relPath, query={}): return urlunparse(( config.get('protocol'), config.get('host'), relPath, '', urlencode(query), '')) class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag) # Basic Disposable Client def create_disposable(clientInfo, config = {}): """ Create a new disposable client. """ response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config) # Disposable App class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist() def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f) def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json') def _get_existing_disposable_app(file, clientInfo, conf): """ Attempt to load an existing """ if not os.path.isfile(file): return None else: data = None with open(file, 'r') as f: data = json.load(f) if not 'client' in data or not 'creds' in data: return None return _BlotreDisposableApp(file, data['client'], creds = data['creds'], config = conf) def _try_redeem_disposable_app(file, client): """ Attempt to redeem a one time code registred on the client. """ redeemedClient = client.redeem_onetime_code(None) if redeemedClient is None: return None else: return _BlotreDisposableApp(file, redeemedClient.client, creds = redeemedClient.creds, config = redeemedClient.config) def _create_new_disposable_app(file, clientInfo, config): client = create_disposable(clientInfo, config = config) if client is None: return None code = client.client['code'] userInput = raw_input("Please redeem disposable code: " + code + '\n') return _try_redeem_disposable_app(file, client) def _check_app_is_valid(client): """ Check to see if the app has valid creds. """ try: if 'refresh_token' in client.creds: client.exchange_refresh_token() else: existing.get_token_info() return True except TokenEndpointError as e: return False def create_disposable_app(clientInfo, config={}): """ Use an existing disposable app if data exists or create a new one and persist the data. """ file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
mattbierner/blotre-py
blotre.py
_get_existing_disposable_app
python
def _get_existing_disposable_app(file, clientInfo, conf): if not os.path.isfile(file): return None else: data = None with open(file, 'r') as f: data = json.load(f) if not 'client' in data or not 'creds' in data: return None return _BlotreDisposableApp(file, data['client'], creds = data['creds'], config = conf)
Attempt to load an existing
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L373-L388
null
import json import os import re import requests try: from urllib import urlencode from urlparse import urlunparse except ImportError: from urllib.parse import urlencode from urllib.parse import urlunparse _JSON_HEADERS = { 'accepts': 'application/json', 'content-type': 'application/json' } ROOT = '/v0/' API_ROOT = ROOT + 'api/' OAUTH2_ROOT = ROOT + 'oauth2/' DEFAULT_CONFIG = { 'protocol': 'https', 'host': 'blot.re', } def _extend(dict1, dict2): extended = dict1.copy() extended.update(dict2) return extended class TokenEndpointError(Exception): """ Error communicating with the token endpoint. """ def __init__(self, error, error_description): self.error = error self.error_description = error_description super(TokenEndpointError, self).__init__( "[%s] %s" % (self.error, self.error_description)) def _token_error_from_data(data): return TokenEndpointError( data.get('error', ''), data.get('error_description', '')) class RestError(Exception): """ Error response from one of the REST APIS. """ def __init__(self, status_code, error_description, details): self.status_code = status_code self.error_description = error_description self.details = details super(RestError, self).__init__( "[%s] %s" % (self.status_code, self.error_description)) def _is_error_response(body): return body.get('type', '') == 'Error' or 'error' in body def _rest_error_from_response(response): body = response.json() return RestError( response.status_code, body['error'], body.get('details', None)) def _format_url(config, relPath, query={}): return urlunparse(( config.get('protocol'), config.get('host'), relPath, '', urlencode(query), '')) class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag) # Basic Disposable Client def create_disposable(clientInfo, config = {}): """ Create a new disposable client. """ response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config) # Disposable App class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist() def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f) def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json') def _try_redeem_disposable_app(file, client): """ Attempt to redeem a one time code registred on the client. """ redeemedClient = client.redeem_onetime_code(None) if redeemedClient is None: return None else: return _BlotreDisposableApp(file, redeemedClient.client, creds = redeemedClient.creds, config = redeemedClient.config) def _create_new_disposable_app(file, clientInfo, config): client = create_disposable(clientInfo, config = config) if client is None: return None code = client.client['code'] userInput = raw_input("Please redeem disposable code: " + code + '\n') return _try_redeem_disposable_app(file, client) def _check_app_is_valid(client): """ Check to see if the app has valid creds. """ try: if 'refresh_token' in client.creds: client.exchange_refresh_token() else: existing.get_token_info() return True except TokenEndpointError as e: return False def create_disposable_app(clientInfo, config={}): """ Use an existing disposable app if data exists or create a new one and persist the data. """ file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
mattbierner/blotre-py
blotre.py
_try_redeem_disposable_app
python
def _try_redeem_disposable_app(file, client): redeemedClient = client.redeem_onetime_code(None) if redeemedClient is None: return None else: return _BlotreDisposableApp(file, redeemedClient.client, creds = redeemedClient.creds, config = redeemedClient.config)
Attempt to redeem a one time code registred on the client.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L390-L401
[ "def redeem_onetime_code(self, code):\n \"\"\"\n Attempt to exchange a onetime token for a new access token.\n If successful, update client to use these credentials.\n \"\"\"\n return self._access_token_endpoint(\n 'https://oauth2grant.blot.re/onetime_code', {\n 'code': code if code else self.client['code']\n })\n" ]
import json import os import re import requests try: from urllib import urlencode from urlparse import urlunparse except ImportError: from urllib.parse import urlencode from urllib.parse import urlunparse _JSON_HEADERS = { 'accepts': 'application/json', 'content-type': 'application/json' } ROOT = '/v0/' API_ROOT = ROOT + 'api/' OAUTH2_ROOT = ROOT + 'oauth2/' DEFAULT_CONFIG = { 'protocol': 'https', 'host': 'blot.re', } def _extend(dict1, dict2): extended = dict1.copy() extended.update(dict2) return extended class TokenEndpointError(Exception): """ Error communicating with the token endpoint. """ def __init__(self, error, error_description): self.error = error self.error_description = error_description super(TokenEndpointError, self).__init__( "[%s] %s" % (self.error, self.error_description)) def _token_error_from_data(data): return TokenEndpointError( data.get('error', ''), data.get('error_description', '')) class RestError(Exception): """ Error response from one of the REST APIS. """ def __init__(self, status_code, error_description, details): self.status_code = status_code self.error_description = error_description self.details = details super(RestError, self).__init__( "[%s] %s" % (self.status_code, self.error_description)) def _is_error_response(body): return body.get('type', '') == 'Error' or 'error' in body def _rest_error_from_response(response): body = response.json() return RestError( response.status_code, body['error'], body.get('details', None)) def _format_url(config, relPath, query={}): return urlunparse(( config.get('protocol'), config.get('host'), relPath, '', urlencode(query), '')) class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag) # Basic Disposable Client def create_disposable(clientInfo, config = {}): """ Create a new disposable client. """ response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config) # Disposable App class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist() def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f) def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json') def _get_existing_disposable_app(file, clientInfo, conf): """ Attempt to load an existing """ if not os.path.isfile(file): return None else: data = None with open(file, 'r') as f: data = json.load(f) if not 'client' in data or not 'creds' in data: return None return _BlotreDisposableApp(file, data['client'], creds = data['creds'], config = conf) def _create_new_disposable_app(file, clientInfo, config): client = create_disposable(clientInfo, config = config) if client is None: return None code = client.client['code'] userInput = raw_input("Please redeem disposable code: " + code + '\n') return _try_redeem_disposable_app(file, client) def _check_app_is_valid(client): """ Check to see if the app has valid creds. """ try: if 'refresh_token' in client.creds: client.exchange_refresh_token() else: existing.get_token_info() return True except TokenEndpointError as e: return False def create_disposable_app(clientInfo, config={}): """ Use an existing disposable app if data exists or create a new one and persist the data. """ file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
mattbierner/blotre-py
blotre.py
_check_app_is_valid
python
def _check_app_is_valid(client): try: if 'refresh_token' in client.creds: client.exchange_refresh_token() else: existing.get_token_info() return True except TokenEndpointError as e: return False
Check to see if the app has valid creds.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L411-L422
[ "def exchange_refresh_token(self):\n \"\"\"\n Attempt to exchange a refresh token for a new access token.\n If successful, update client to use these credentials.\n \"\"\"\n return self._access_token_endpoint('refresh_token', {\n 'refresh_token': self.creds['refresh_token']\n })\n" ]
import json import os import re import requests try: from urllib import urlencode from urlparse import urlunparse except ImportError: from urllib.parse import urlencode from urllib.parse import urlunparse _JSON_HEADERS = { 'accepts': 'application/json', 'content-type': 'application/json' } ROOT = '/v0/' API_ROOT = ROOT + 'api/' OAUTH2_ROOT = ROOT + 'oauth2/' DEFAULT_CONFIG = { 'protocol': 'https', 'host': 'blot.re', } def _extend(dict1, dict2): extended = dict1.copy() extended.update(dict2) return extended class TokenEndpointError(Exception): """ Error communicating with the token endpoint. """ def __init__(self, error, error_description): self.error = error self.error_description = error_description super(TokenEndpointError, self).__init__( "[%s] %s" % (self.error, self.error_description)) def _token_error_from_data(data): return TokenEndpointError( data.get('error', ''), data.get('error_description', '')) class RestError(Exception): """ Error response from one of the REST APIS. """ def __init__(self, status_code, error_description, details): self.status_code = status_code self.error_description = error_description self.details = details super(RestError, self).__init__( "[%s] %s" % (self.status_code, self.error_description)) def _is_error_response(body): return body.get('type', '') == 'Error' or 'error' in body def _rest_error_from_response(response): body = response.json() return RestError( response.status_code, body['error'], body.get('details', None)) def _format_url(config, relPath, query={}): return urlunparse(( config.get('protocol'), config.get('host'), relPath, '', urlencode(query), '')) class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag) # Basic Disposable Client def create_disposable(clientInfo, config = {}): """ Create a new disposable client. """ response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config) # Disposable App class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist() def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f) def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json') def _get_existing_disposable_app(file, clientInfo, conf): """ Attempt to load an existing """ if not os.path.isfile(file): return None else: data = None with open(file, 'r') as f: data = json.load(f) if not 'client' in data or not 'creds' in data: return None return _BlotreDisposableApp(file, data['client'], creds = data['creds'], config = conf) def _try_redeem_disposable_app(file, client): """ Attempt to redeem a one time code registred on the client. """ redeemedClient = client.redeem_onetime_code(None) if redeemedClient is None: return None else: return _BlotreDisposableApp(file, redeemedClient.client, creds = redeemedClient.creds, config = redeemedClient.config) def _create_new_disposable_app(file, clientInfo, config): client = create_disposable(clientInfo, config = config) if client is None: return None code = client.client['code'] userInput = raw_input("Please redeem disposable code: " + code + '\n') return _try_redeem_disposable_app(file, client) def create_disposable_app(clientInfo, config={}): """ Use an existing disposable app if data exists or create a new one and persist the data. """ file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
mattbierner/blotre-py
blotre.py
create_disposable_app
python
def create_disposable_app(clientInfo, config={}): file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
Use an existing disposable app if data exists or create a new one and persist the data.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L424-L437
[ "def _get_disposable_app_filename(clientInfo):\n \"\"\"\n Get name of file used to store creds.\n \"\"\"\n return clientInfo.get('file', clientInfo['name'] + '.client_data.json')\n", "def _get_existing_disposable_app(file, clientInfo, conf):\n \"\"\"\n Attempt to load an existing \n \"\"\"\n if not os.path.isfile(file):\n return None\n else:\n data = None\n with open(file, 'r') as f:\n data = json.load(f)\n if not 'client' in data or not 'creds' in data:\n return None\n return _BlotreDisposableApp(file,\n data['client'],\n creds = data['creds'],\n config = conf)\n", "def _create_new_disposable_app(file, clientInfo, config):\n client = create_disposable(clientInfo, config = config)\n if client is None:\n return None\n code = client.client['code']\n userInput = raw_input(\"Please redeem disposable code: \" + code + '\\n')\n return _try_redeem_disposable_app(file, client)\n", "def _check_app_is_valid(client):\n \"\"\"\n Check to see if the app has valid creds.\n \"\"\"\n try:\n if 'refresh_token' in client.creds:\n client.exchange_refresh_token()\n else:\n existing.get_token_info()\n return True\n except TokenEndpointError as e:\n return False\n" ]
import json import os import re import requests try: from urllib import urlencode from urlparse import urlunparse except ImportError: from urllib.parse import urlencode from urllib.parse import urlunparse _JSON_HEADERS = { 'accepts': 'application/json', 'content-type': 'application/json' } ROOT = '/v0/' API_ROOT = ROOT + 'api/' OAUTH2_ROOT = ROOT + 'oauth2/' DEFAULT_CONFIG = { 'protocol': 'https', 'host': 'blot.re', } def _extend(dict1, dict2): extended = dict1.copy() extended.update(dict2) return extended class TokenEndpointError(Exception): """ Error communicating with the token endpoint. """ def __init__(self, error, error_description): self.error = error self.error_description = error_description super(TokenEndpointError, self).__init__( "[%s] %s" % (self.error, self.error_description)) def _token_error_from_data(data): return TokenEndpointError( data.get('error', ''), data.get('error_description', '')) class RestError(Exception): """ Error response from one of the REST APIS. """ def __init__(self, status_code, error_description, details): self.status_code = status_code self.error_description = error_description self.details = details super(RestError, self).__init__( "[%s] %s" % (self.status_code, self.error_description)) def _is_error_response(body): return body.get('type', '') == 'Error' or 'error' in body def _rest_error_from_response(response): body = response.json() return RestError( response.status_code, body['error'], body.get('details', None)) def _format_url(config, relPath, query={}): return urlunparse(( config.get('protocol'), config.get('host'), relPath, '', urlencode(query), '')) class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag) # Basic Disposable Client def create_disposable(clientInfo, config = {}): """ Create a new disposable client. """ response = requests.put( _format_url( _extend(DEFAULT_CONFIG, config), OAUTH2_ROOT + 'disposable'), json = clientInfo) if response.status_code != 200: return None else: body = response.json() return Blotre({ 'client_id': body['id'], 'client_secret': body['secret'], 'code': body['code'] }, config = config) # Disposable App class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist() def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f) def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json') def _get_existing_disposable_app(file, clientInfo, conf): """ Attempt to load an existing """ if not os.path.isfile(file): return None else: data = None with open(file, 'r') as f: data = json.load(f) if not 'client' in data or not 'creds' in data: return None return _BlotreDisposableApp(file, data['client'], creds = data['creds'], config = conf) def _try_redeem_disposable_app(file, client): """ Attempt to redeem a one time code registred on the client. """ redeemedClient = client.redeem_onetime_code(None) if redeemedClient is None: return None else: return _BlotreDisposableApp(file, redeemedClient.client, creds = redeemedClient.creds, config = redeemedClient.config) def _create_new_disposable_app(file, clientInfo, config): client = create_disposable(clientInfo, config = config) if client is None: return None code = client.client['code'] userInput = raw_input("Please redeem disposable code: " + code + '\n') return _try_redeem_disposable_app(file, client) def _check_app_is_valid(client): """ Check to see if the app has valid creds. """ try: if 'refresh_token' in client.creds: client.exchange_refresh_token() else: existing.get_token_info() return True except TokenEndpointError as e: return False def create_disposable_app(clientInfo, config={}): """ Use an existing disposable app if data exists or create a new one and persist the data. """ file = _get_disposable_app_filename(clientInfo) existing = _get_existing_disposable_app(file, clientInfo, config) if existing: if _check_app_is_valid(existing): return existing else: print("Existing client has expired, must recreate.") return _create_new_disposable_app(file, clientInfo, config)
mattbierner/blotre-py
blotre.py
Blotre.set_creds
python
def set_creds(self, newCreds): self.creds = newCreds self.on_creds_changed(newCreds) return self
Manually update the current creds.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L86-L90
[ "def on_creds_changed(self, newCreds):\n \"\"\"\n Overridable function called when the creds change\n \"\"\"\n pass\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.normalize_uri
python
def normalize_uri(self, uri): return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'')
Convert a stream path into it's normalized form.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L98-L102
null
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.get_authorization_url
python
def get_authorization_url(self): return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') })
Get the authorization Url for the current client.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L121-L129
[ "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre._access_token_endpoint
python
def _access_token_endpoint(self, grantType, extraParams={}): response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data)
Base exchange of data for an access_token.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L138-L155
[ "def _extend(dict1, dict2):\n extended = dict1.copy()\n extended.update(dict2)\n return extended\n", "def _token_error_from_data(data):\n return TokenEndpointError(\n data.get('error', ''),\n data.get('error_description', ''))\n", "def set_creds(self, newCreds):\n \"\"\"Manually update the current creds.\"\"\"\n self.creds = newCreds\n self.on_creds_changed(newCreds)\n return self\n", "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.get_token_info
python
def get_token_info(self): response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data
Get information about the current access token.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L185-L197
[ "def _token_error_from_data(data):\n return TokenEndpointError(\n data.get('error', ''),\n data.get('error_description', ''))\n", "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre._add_auth_headers
python
def _add_auth_headers(self, base): if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base
Attach the acces_token to a request.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L200-L206
null
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre._is_expired_response
python
def _is_expired_response(self, response): if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge
Check if the response failed because of an expired access token.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L208-L215
null
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre._make_request
python
def _make_request(self, type, path, args, noRetry=False): response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response)
Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L217-L234
[ "def _rest_error_from_response(response):\n body = response.json()\n return RestError(\n response.status_code,\n body['error'],\n body.get('details', None))\n", "def _add_auth_headers(self, base):\n \"\"\"Attach the acces_token to a request.\"\"\"\n if 'access_token' in self.creds:\n return _extend(base, {\n 'authorization': 'Bearer ' + self.creds['access_token']\n })\n return base\n", "def _is_expired_response(self, response):\n \"\"\"\n Check if the response failed because of an expired access token.\n \"\"\"\n if response.status_code != 401:\n return False\n challenge = response.headers.get('www-authenticate', '')\n return 'error=\"invalid_token\"' in challenge\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.get
python
def get(self, path, query={}): return self._make_request('get', self._format_url(API_ROOT + path, query=query), {})
GET request.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L236-L239
[ "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n", "def _make_request(self, type, path, args, noRetry=False):\n \"\"\"\n Make a request to Blot're.\n\n Attempts to reply the request if it fails due to an expired\n access token.\n \"\"\"\n response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args)\n if response.status_code == 200 or response.status_code == 201:\n return response.json()\n elif not noRetry and self._is_expired_response(response) \\\n and 'refresh_token' in self.creds:\n try:\n self.exchange_refresh_token()\n except TokenEndpointError:\n raise _rest_error_from_response(response)\n return self._make_request(type, path, args, noRetry = True)\n raise _rest_error_from_response(response)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.post
python
def post(self, path, body): return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body })
POST request.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L241-L246
[ "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n", "def _make_request(self, type, path, args, noRetry=False):\n \"\"\"\n Make a request to Blot're.\n\n Attempts to reply the request if it fails due to an expired\n access token.\n \"\"\"\n response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args)\n if response.status_code == 200 or response.status_code == 201:\n return response.json()\n elif not noRetry and self._is_expired_response(response) \\\n and 'refresh_token' in self.creds:\n try:\n self.exchange_refresh_token()\n except TokenEndpointError:\n raise _rest_error_from_response(response)\n return self._make_request(type, path, args, noRetry = True)\n raise _rest_error_from_response(response)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.put
python
def put(self, path, body): return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body })
PUT request.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L248-L253
[ "def _format_url(self, relPath, query={}):\n return _format_url(self.config, relPath, query)\n", "def _make_request(self, type, path, args, noRetry=False):\n \"\"\"\n Make a request to Blot're.\n\n Attempts to reply the request if it fails due to an expired\n access token.\n \"\"\"\n response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args)\n if response.status_code == 200 or response.status_code == 201:\n return response.json()\n elif not noRetry and self._is_expired_response(response) \\\n and 'refresh_token' in self.creds:\n try:\n self.exchange_refresh_token()\n except TokenEndpointError:\n raise _rest_error_from_response(response)\n return self._make_request(type, path, args, noRetry = True)\n raise _rest_error_from_response(response)\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
Blotre.get_child
python
def get_child(self, streamId, childId, options={}): return self.get('stream/' + streamId + '/children/' + childId, options)
Get the child of a stream.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L294-L296
[ "def get(self, path, query={}):\n \"\"\"GET request.\"\"\"\n return self._make_request('get',\n self._format_url(API_ROOT + path, query=query), {})\n" ]
class Blotre: """ Main Blot're flow object. """ def __init__(self, client, creds={}, config={}): self.client = client self.config = _extend(DEFAULT_CONFIG, config) self.creds = creds def set_creds(self, newCreds): """Manually update the current creds.""" self.creds = newCreds self.on_creds_changed(newCreds) return self def on_creds_changed(self, newCreds): """ Overridable function called when the creds change """ pass def normalize_uri(self, uri): """Convert a stream path into it's normalized form.""" return urllib.quote( re.sub(r"\s", '+', uri.strip().lower()), safe = '~@#$&()*!+=:),.?/\'') def join_uri(self, *paths): return '/'.join(self.normalize_uri(x) for x in paths) def _get_websocket_protocol(self): return 'ws' if self.config.protocol == 'http' else 'wss' def get_websocket_url(self): """ Get the url for using the websocked APIs, used for both subscription and send/receive. """ return self._get_websocket_protocol() + '://' + config.host + '/v0/ws' def _format_url(self, relPath, query={}): return _format_url(self.config, relPath, query) # Authorization def get_authorization_url(self): """Get the authorization Url for the current client.""" return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') }) def get_redeem_url(self): """ Get the Url where a user can redeem a onetime code for a disposable client. """ return self._format_url(OAUTH2_ROOT + 'redeem') def _access_token_endpoint(self, grantType, extraParams={}): """ Base exchange of data for an access_token. """ response = requests.post( self._format_url(OAUTH2_ROOT + 'access_token'), data = _extend({ 'grant_type': grantType, 'client_id': self.client.get('client_id', ''), 'client_secret': self.client.get('client_secret', ''), 'redirect_uri': self.client.get('redirect_uri', '') }, extraParams)) data = response.json() if 'error' in data or 'error_description' in data: raise _token_error_from_data(data) else: return self.set_creds(data) def redeem_authorization_code(self, code): """ Exchange an authorization code for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('authorization_code', { 'code': code }) def exchange_refresh_token(self): """ Attempt to exchange a refresh token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint('refresh_token', { 'refresh_token': self.creds['refresh_token'] }) def redeem_onetime_code(self, code): """ Attempt to exchange a onetime token for a new access token. If successful, update client to use these credentials. """ return self._access_token_endpoint( 'https://oauth2grant.blot.re/onetime_code', { 'code': code if code else self.client['code'] }) def get_token_info(self): """ Get information about the current access token. """ response = requests.get( self._format_url(OAUTH2_ROOT + 'token_info', { 'token': self.creds['access_token'] })) data = response.json() if response.status_code != 200: raise _token_error_from_data(data) else: return data # Requests def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base def _is_expired_response(self, response): """ Check if the response failed because of an expired access token. """ if response.status_code != 401: return False challenge = response.headers.get('www-authenticate', '') return 'error="invalid_token"' in challenge def _make_request(self, type, path, args, noRetry=False): """ Make a request to Blot're. Attempts to reply the request if it fails due to an expired access token. """ response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args) if response.status_code == 200 or response.status_code == 201: return response.json() elif not noRetry and self._is_expired_response(response) \ and 'refresh_token' in self.creds: try: self.exchange_refresh_token() except TokenEndpointError: raise _rest_error_from_response(response) return self._make_request(type, path, args, noRetry = True) raise _rest_error_from_response(response) def get(self, path, query={}): """GET request.""" return self._make_request('get', self._format_url(API_ROOT + path, query=query), {}) def post(self, path, body): """POST request.""" return self._make_request('post', self._format_url(API_ROOT + path), { 'json': body }) def put(self, path, body): """PUT request.""" return self._make_request('put', self._format_url(API_ROOT + path), { 'json': body }) def delete(self, path): """DELETE request.""" return self._make_request('get', self._format_url(API_ROOT + path), {}) # User Operations def get_user(self, userId, options={}): """Get a user by id.""" return self.get('user/' + userId, options) # Stream Operations def get_streams(self, options={}): """Stream lookup.""" return self.get('stream', options) def create_stream(self, body): """Create a new stream.""" return self.put('stream', body) def get_stream(self, id, options={}): """Get a stream.""" return self.get('stream/' + id, options) def delete_stream(self, id): """Delete an existing stream.""" return self.delete('stream', id) def get_stream_status(self, id, options={}): """Get the status of a stream.""" return self.get('stream/' + id, options) def set_stream_status(self, id, body): """Update the status of a stream.""" return self.post('stream/' + id + '/status', body) def get_stream_children(self, id, options={}): """Get the children of a stream.""" return self.get('stream/' + id + '/children', options) def create_child(self, streamId, childId): """Add a new child to a stream.""" return self.put('stream/' + streamId + '/children/' + childId) def delete_child(self, streamId, childId): """Remove a child from a stream.""" return self.delete('stream/' + streamId + '/children/' + childId) def get_tags(streamId): """Get the tags of a stream.""" self.get('stream/' + streamId + '/tags') def set_tags(streamId): """Set the tags of a stream.""" self.post('stream/' + streamId + '/tags') def get_tag(streamId, tag): """Get a tag on a stream.""" self.get('stream/' + streamId + '/tags/' + tag) def set_tag(streamId, tag): """Create a tag on a stream.""" self.put('stream/' + streamId + '/tags/' + tag) def delete_tag(streamId, tag): """Remove a tag on a stream.""" self.delete('stream/' + streamId + '/tags/' + tag)
mattbierner/blotre-py
blotre.py
_BlotreDisposableApp._persist
python
def _persist(self): with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f)
Persist client data.
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L358-L365
null
class _BlotreDisposableApp(Blotre): def __init__(self, file, client, **kwargs): Blotre.__init__(self, client, **kwargs) self.file = file self._persist() def on_creds_changed(self, newCreds): self._persist()
arraylabs/pymyq
pymyq/api.py
login
python
async def login( username: str, password: str, brand: str, websession: ClientSession = None) -> API: api = API(brand, websession) await api.authenticate(username, password) return api
Log in to the API.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L286-L292
[ "async def authenticate(self, username: str, password: str) -> None:\n \"\"\"Authenticate against the API.\"\"\"\n self._credentials = {\n 'username': username,\n 'password': password,\n }\n\n await self._get_security_token()\n" ]
"""Define the MyQ API.""" import asyncio import logging from datetime import datetime, timedelta from typing import Optional from aiohttp import ClientSession from aiohttp.client_exceptions import ClientError from .errors import MyQError, RequestError, UnsupportedBrandError _LOGGER = logging.getLogger(__name__) API_BASE = 'https://myqexternal.myqdevice.com' LOGIN_ENDPOINT = "api/v4/User/Validate" DEVICE_LIST_ENDPOINT = "api/v4/UserDeviceDetails/Get" DEFAULT_TIMEOUT = 1 DEFAULT_REQUEST_RETRIES = 3 MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5) DEFAULT_USER_AGENT = "Chamberlain/3773 (iPhone; iOS 11.0.3; Scale/2.00)" BRAND_MAPPINGS = { 'liftmaster': { 'app_id': 'Vj8pQggXLhLy0WHahglCD4N1nAkkXQtGYpq2HrHD7H1nvmbT55KqtN6RSF4ILB/i' }, 'chamberlain': { 'app_id': 'OA9I/hgmPHFp9RYKJqCKfwnhh28uqLJzZ9KOJf1DXoo8N2XAaVX6A1wcLYyWsnnv' }, 'craftsman': { 'app_id': 'YmiMRRS1juXdSd0KWsuKtHmQvh5RftEp5iewHdCvsNB77FnQbY+vjCVn2nMdIeN8' }, 'merlin': { 'app_id': '3004cac4e920426c823fa6c2ecf0cc28ef7d4a7b74b6470f8f0d94d6c39eb718' } } SUPPORTED_DEVICE_TYPE_NAMES = [ 'Garage Door Opener WGDO', 'GarageDoorOpener', 'Gate', 'VGDO', ] class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed') async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token() async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken'] async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
arraylabs/pymyq
pymyq/api.py
API._create_websession
python
def _create_websession(self): from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False
Create a web session.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L73-L89
null
class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed') async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token() async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken'] async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
arraylabs/pymyq
pymyq/api.py
API.close_websession
python
async def close_websession(self): # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed')
Close web session if not already closed and created by us.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L91-L104
null
class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token() async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken'] async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
arraylabs/pymyq
pymyq/api.py
API.authenticate
python
async def authenticate(self, username: str, password: str) -> None: self._credentials = { 'username': username, 'password': password, } await self._get_security_token()
Authenticate against the API.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L219-L226
[ "async def _get_security_token(self) -> None:\n \"\"\"Request a security token.\"\"\"\n _LOGGER.debug('Requesting security token.')\n if self._credentials is None:\n return\n\n # Make sure only 1 request can be sent at a time.\n async with self._security_token_lock:\n # Confirm there is still no security token.\n if self._security_token is None:\n login_resp = await self._request(\n 'post',\n LOGIN_ENDPOINT,\n json=self._credentials,\n login_request=True,\n )\n\n return_code = int(login_resp.get('ReturnCode', 1))\n if return_code != 0:\n if return_code == 203:\n # Invalid username or password.\n _LOGGER.debug('Invalid username or password')\n self._credentials = None\n raise MyQError(login_resp['ErrorMessage'])\n\n self._security_token = login_resp['SecurityToken']\n" ]
class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed') async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken'] async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
arraylabs/pymyq
pymyq/api.py
API._get_security_token
python
async def _get_security_token(self) -> None: _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken']
Request a security token.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L228-L253
[ "async def _request(\n self,\n method: str,\n endpoint: str,\n *,\n headers: dict = None,\n params: dict = None,\n data: dict = None,\n json: dict = None,\n login_request: bool = False,\n **kwargs) -> Optional[dict]:\n\n # Get a security token if we do not have one AND this request\n # is not to get a security token.\n if self._security_token is None and not login_request:\n await self._get_security_token()\n if self._security_token is None:\n return None\n\n url = '{0}/{1}'.format(API_BASE, endpoint)\n\n if not headers:\n headers = {}\n if self._security_token:\n headers['SecurityToken'] = self._security_token\n headers.update({\n 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'],\n 'User-Agent': DEFAULT_USER_AGENT,\n })\n\n # Create the web session if none exist.\n if self._websession is None:\n self._create_websession()\n\n start_request_time = datetime.time(datetime.now())\n _LOGGER.debug('%s Initiating request to %s', start_request_time, url)\n timeout = DEFAULT_TIMEOUT\n # Repeat twice amount of max requests retries for timeout errors.\n for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1):\n try:\n async with self._websession.request(\n method, url, headers=headers, params=params,\n data=data, json=json, timeout=timeout,\n **kwargs) as resp:\n resp.raise_for_status()\n return await resp.json(content_type=None)\n except asyncio.TimeoutError:\n # Start increasing timeout if already tried twice..\n if attempt > 1:\n timeout = timeout * 2\n _LOGGER.debug('%s Timeout requesting from %s',\n start_request_time, endpoint)\n except ClientError as err:\n if attempt == DEFAULT_REQUEST_RETRIES - 1:\n raise RequestError('{} Client Error while requesting '\n 'data from {}: {}'.format(\n start_request_time, endpoint,\n err))\n\n _LOGGER.warning('%s Error requesting from %s; retrying: '\n '%s', start_request_time, endpoint, err)\n await asyncio.sleep(5)\n\n raise RequestError('{} Constant timeouts while requesting data '\n 'from {}'.format(start_request_time, endpoint))\n" ]
class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed') async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token() async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
arraylabs/pymyq
pymyq/api.py
API.get_devices
python
async def get_devices(self, covers_only: bool = True) -> list: from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
Get a list of all devices associated with the account.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L255-L283
[ "async def _request(\n self,\n method: str,\n endpoint: str,\n *,\n headers: dict = None,\n params: dict = None,\n data: dict = None,\n json: dict = None,\n login_request: bool = False,\n **kwargs) -> Optional[dict]:\n\n # Get a security token if we do not have one AND this request\n # is not to get a security token.\n if self._security_token is None and not login_request:\n await self._get_security_token()\n if self._security_token is None:\n return None\n\n url = '{0}/{1}'.format(API_BASE, endpoint)\n\n if not headers:\n headers = {}\n if self._security_token:\n headers['SecurityToken'] = self._security_token\n headers.update({\n 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'],\n 'User-Agent': DEFAULT_USER_AGENT,\n })\n\n # Create the web session if none exist.\n if self._websession is None:\n self._create_websession()\n\n start_request_time = datetime.time(datetime.now())\n _LOGGER.debug('%s Initiating request to %s', start_request_time, url)\n timeout = DEFAULT_TIMEOUT\n # Repeat twice amount of max requests retries for timeout errors.\n for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1):\n try:\n async with self._websession.request(\n method, url, headers=headers, params=params,\n data=data, json=json, timeout=timeout,\n **kwargs) as resp:\n resp.raise_for_status()\n return await resp.json(content_type=None)\n except asyncio.TimeoutError:\n # Start increasing timeout if already tried twice..\n if attempt > 1:\n timeout = timeout * 2\n _LOGGER.debug('%s Timeout requesting from %s',\n start_request_time, endpoint)\n except ClientError as err:\n if attempt == DEFAULT_REQUEST_RETRIES - 1:\n raise RequestError('{} Client Error while requesting '\n 'data from {}: {}'.format(\n start_request_time, endpoint,\n err))\n\n _LOGGER.warning('%s Error requesting from %s; retrying: '\n '%s', start_request_time, endpoint, err)\n await asyncio.sleep(5)\n\n raise RequestError('{} Constant timeouts while requesting data '\n 'from {}'.format(start_request_time, endpoint))\n" ]
class API: """Define a class for interacting with the MyQ iOS App API.""" def __init__(self, brand: str, websession: ClientSession = None) -> None: """Initialize the API object.""" if brand not in BRAND_MAPPINGS: raise UnsupportedBrandError('Unknown brand: {0}'.format(brand)) self._brand = brand self._websession = websession self._supplied_websession = True self._credentials = None self._security_token = None self._devices = [] self._last_update = None self.online = False self._update_lock = asyncio.Lock() self._security_token_lock = asyncio.Lock() def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed') async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, data: dict = None, json: dict = None, login_request: bool = False, **kwargs) -> Optional[dict]: # Get a security token if we do not have one AND this request # is not to get a security token. if self._security_token is None and not login_request: await self._get_security_token() if self._security_token is None: return None url = '{0}/{1}'.format(API_BASE, endpoint) if not headers: headers = {} if self._security_token: headers['SecurityToken'] = self._security_token headers.update({ 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'], 'User-Agent': DEFAULT_USER_AGENT, }) # Create the web session if none exist. if self._websession is None: self._create_websession() start_request_time = datetime.time(datetime.now()) _LOGGER.debug('%s Initiating request to %s', start_request_time, url) timeout = DEFAULT_TIMEOUT # Repeat twice amount of max requests retries for timeout errors. for attempt in range(0, (DEFAULT_REQUEST_RETRIES * 2) - 1): try: async with self._websession.request( method, url, headers=headers, params=params, data=data, json=json, timeout=timeout, **kwargs) as resp: resp.raise_for_status() return await resp.json(content_type=None) except asyncio.TimeoutError: # Start increasing timeout if already tried twice.. if attempt > 1: timeout = timeout * 2 _LOGGER.debug('%s Timeout requesting from %s', start_request_time, endpoint) except ClientError as err: if attempt == DEFAULT_REQUEST_RETRIES - 1: raise RequestError('{} Client Error while requesting ' 'data from {}: {}'.format( start_request_time, endpoint, err)) _LOGGER.warning('%s Error requesting from %s; retrying: ' '%s', start_request_time, endpoint, err) await asyncio.sleep(5) raise RequestError('{} Constant timeouts while requesting data ' 'from {}'.format(start_request_time, endpoint)) async def _update_device_state(self) -> None: async with self._update_lock: if datetime.utcnow() - self._last_update >\ MIN_TIME_BETWEEN_UPDATES: self.online = await self._get_device_states() async def _get_device_states(self) -> bool: _LOGGER.debug('Retrieving new device states') try: devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) except RequestError as err: _LOGGER.error('Getting device states failed: %s', err) return False if devices_resp is None: return False return_code = int(devices_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == -3333: # Login error, need to retrieve a new token next time. self._security_token = None _LOGGER.debug('Security token expired') else: _LOGGER.error( 'Error %s while retrieving states: %s', devices_resp.get('ReturnCode'), devices_resp.get('ErrorMessage', 'Unknown Error')) return False self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('New device states retrieved') return True def _store_device_states(self, devices: dict) -> None: for device in self._devices: myq_device = next( (element for element in devices if element.get('MyQDeviceId') == device['device_id']), None) if myq_device is not None: device['device_info'] = myq_device continue self._last_update = datetime.utcnow() async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token() async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken']
arraylabs/pymyq
example.py
main
python
async def main() -> None: loglevels = dict((logging.getLevelName(level), level) for level in [10, 20, 30, 40, 50]) logging.basicConfig( level=loglevels[LOGLEVEL], format='%(asctime)s:%(levelname)s:\t%(name)s\t%(message)s') async with ClientSession() as websession: try: myq = await pymyq.login( MYQ_ACCOUNT_EMAIL, MYQ_ACCOUNT_PASSWORD, MYQ_BRAND, websession) devices = await myq.get_devices() for idx, device in enumerate(devices): print('Device #{0}: {1}'.format(idx + 1, device.name)) print('--------') print('Brand: {0}'.format(device.brand)) print('Type: {0}'.format(device.type)) print('Serial: {0}'.format(device.serial)) print('Device ID: {0}'.format(device.device_id)) print('Parent ID: {0}'.format(device.parent_id)) print('Online: {0}'.format(device.available)) print('Unattended Open: {0}'.format(device.open_allowed)) print('Unattended Close: {0}'.format(device.close_allowed)) print() print('Current State: {0}'.format(device.state)) if JSON_DUMP: print(json.dumps(device._device, indent=4)) else: if device.state != STATE_OPEN: print('Opening the device...') await device.open() print(' 0 Current State: {0}'.format(device.state)) for waited in range(1, 30): if device.state == STATE_OPEN: break await asyncio.sleep(1) await device.update() print(' {} Current State: {}'.format( waited, device.state)) await asyncio.sleep(10) await device.update() print() print('Current State: {0}'.format(device.state)) if device.state != STATE_CLOSED: print('Closing the device...') await device.close() print(' 0 Current State: {0}'.format(device.state)) for waited in range(1, 30): if device.state == STATE_CLOSED: break await asyncio.sleep(1) await device.update() print(' {} Current State: {}'.format( waited, device.state)) await asyncio.sleep(10) await device.update() print() print('Current State: {0}'.format(device.state)) except MyQError as err: print(err)
Create the aiohttp session and run the example.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/example.py#L31-L97
[ "async def login(\n username: str, password: str, brand: str,\n websession: ClientSession = None) -> API:\n \"\"\"Log in to the API.\"\"\"\n api = API(brand, websession)\n await api.authenticate(username, password)\n return api\n" ]
"""Run an example script to quickly test any MyQ account.""" import asyncio import logging import json from aiohttp import ClientSession import pymyq from pymyq.device import STATE_CLOSED, STATE_OPEN from pymyq.errors import MyQError # Provide your email and password account details for MyQ. MYQ_ACCOUNT_EMAIL = '<EMAIL>' MYQ_ACCOUNT_PASSWORD = '<PASSWORD>' # BRAND can be one of the following: # liftmaster # chamberlain # craftsmaster # merlin MYQ_BRAND = '<BRAND>' LOGLEVEL = 'ERROR' # Set JSON_DUMP to True to dump all the device information retrieved, # this can be helpful to determine what else is available. # Set JSON_DUMP to False to open/close the doors instead. i.e.: # JSON_DUMP = False JSON_DUMP = True asyncio.get_event_loop().run_until_complete(main())
arraylabs/pymyq
pymyq/device.py
MyQDevice.name
python
def name(self) -> str: return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc')
Return the device name.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L64-L68
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.available
python
def available(self) -> bool: # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available
Return if device is online or not.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L71-L81
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.open_allowed
python
def open_allowed(self) -> bool: return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1"
Door can be opened unattended.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L89-L94
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.close_allowed
python
def close_allowed(self) -> bool: return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1"
Door can be closed unattended.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L97-L102
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.state
python
def state(self) -> str: return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate'))
Return the current state of the device (if it exists).
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L105-L111
[ "def _coerce_state_from_string(value: Union[int, str]) -> str:\n \"\"\"Return a proper state from a string input.\"\"\"\n try:\n return STATE_MAP[int(value)]\n except KeyError:\n _LOGGER.error('Unknown state: %s', value)\n return STATE_UNKNOWN\n" ]
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice._update_state
python
def _update_state(self, value: str) -> None: attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value
Update state temporary during open or close.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L113-L119
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice._coerce_state_from_string
python
def _coerce_state_from_string(value: Union[int, str]) -> str: try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN
Return a proper state from a string input.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L127-L133
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice._set_state
python
async def _set_state(self, state: int) -> bool: try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True
Set the state of the device.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L136-L161
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.close
python
async def close(self) -> bool: _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True
Close the device.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L163-L179
[ "async def _set_state(self, state: int) -> bool:\n \"\"\"Set the state of the device.\"\"\"\n try:\n set_state_resp = await self.api._request(\n 'put',\n DEVICE_SET_ENDPOINT,\n json={\n 'attributeName': 'desireddoorstate',\n 'myQDeviceId': self.device_id,\n 'AttributeValue': state,\n })\n except RequestError as err:\n _LOGGER.error('%s: Setting state failed (and halting): %s',\n self.name, err)\n return False\n\n if set_state_resp is None:\n return False\n\n if int(set_state_resp.get('ReturnCode', 1)) != 0:\n _LOGGER.error(\n '%s: Error setting the device state: %s', self.name,\n set_state_resp.get('ErrorMessage', 'Unknown Error'))\n return False\n\n return True\n" ]
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.open
python
async def open(self) -> bool: _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True
Open the device.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L181-L197
[ "async def _set_state(self, state: int) -> bool:\n \"\"\"Set the state of the device.\"\"\"\n try:\n set_state_resp = await self.api._request(\n 'put',\n DEVICE_SET_ENDPOINT,\n json={\n 'attributeName': 'desireddoorstate',\n 'myQDeviceId': self.device_id,\n 'AttributeValue': state,\n })\n except RequestError as err:\n _LOGGER.error('%s: Setting state failed (and halting): %s',\n self.name, err)\n return False\n\n if set_state_resp is None:\n return False\n\n if int(set_state_resp.get('ReturnCode', 1)) != 0:\n _LOGGER.error(\n '%s: Error setting the device state: %s', self.name,\n set_state_resp.get('ErrorMessage', 'Unknown Error'))\n return False\n\n return True\n" ]
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True # pylint: disable=protected-access async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info'] async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
arraylabs/pymyq
pymyq/device.py
MyQDevice.update
python
async def update(self) -> None: if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info']
Retrieve updated device state.
train
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L200-L208
null
class MyQDevice: """Define a generic MyQ device.""" def __init__(self, device: dict, brand: str, api: API) -> None: """Initialize.""" self._brand = brand self._device = device self._device_json = device['device_info'] self._device_id = self._device_json['MyQDeviceId'] self.api = api self.next_allowed_update = None @property def brand(self) -> str: """Return the brand of this device.""" return self._brand @property def device_id(self) -> int: """Return the device ID.""" return self._device_id @property def parent_id(self) -> Union[None, int]: """Return the ID of the parent device (if it exists).""" return self._device_json.get('ParentMyQDeviceId') @property def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc') @property def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available @property def serial(self) -> str: """Return the device serial number.""" return self._device_json.get('SerialNumber') @property def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1" @property def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1" @property def state(self) -> str: """Return the current state of the device (if it exists).""" return self._coerce_state_from_string( next( attr['Value'] for attr in self._device_json.get( 'Attributes', []) if attr.get('AttributeDisplayName') == 'doorstate')) def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value @property def type(self) -> str: """Return the device type.""" return self._device_json.get('MyQDeviceTypeName') @staticmethod def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN # pylint: disable=protected-access async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True async def open(self) -> bool: """Open the device.""" _LOGGER.debug('%s: Sending open command', self.name) if not await self._set_state(1): return False # Do not allow update of this device's state for 5 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=5) # Ensure state is open or opening if self.state not in (STATE_OPEN, STATE_OPENING): # Set state to opening self._update_state('4') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Open command send', self.name) return True # pylint: disable=protected-access async def close_connection(self): """Close the web session connection with MyQ""" await self.api.close_websession()
xmunoz/sodapy
sodapy/__init__.py
_raise_for_status
python
def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response)
Custom raise_for_status with more appropriate error message.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L510-L531
null
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import object from io import StringIO, IOBase import requests import csv import json import logging import re import os from .constants import DEFAULT_API_PATH, OLD_API_PATH, DATASETS_PATH class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close() # helper methods def _clear_empty_values(args): ''' Scrap junk data from a dict. ''' result = {} for param in args: if args[param] is not None: result[param] = args[param] return result def _format_old_api_request(dataid=None, content_type=None): if dataid is not None: if content_type is not None: return "{0}/{1}.{2}".format(OLD_API_PATH, dataid, content_type) else: return "{0}/{1}".format(OLD_API_PATH, dataid) else: if content_type is not None: return "{0}.{1}".format(OLD_API_PATH, content_type) else: raise Exception("This method requires at least a dataset_id or content_type.") def _format_new_api_request(dataid=None, row_id=None, content_type=None): if dataid is not None: if content_type is not None: if row_id is not None: return "{0}{1}/{2}.{3}".format(DEFAULT_API_PATH, dataid, row_id, content_type) else: return "{0}{1}.{2}".format(DEFAULT_API_PATH, dataid, content_type) raise Exception("This method requires at least a dataset_id or content_type.") def authentication_validation(username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) is not bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.") def _download_file(url, local_filename): ''' Utility function that downloads a chunked response from the specified url to a local path. This method is suitable for larger downloads. ''' response = requests.get(url, stream=True) with open(local_filename, 'wb') as outfile: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks outfile.write(chunk)
xmunoz/sodapy
sodapy/__init__.py
_clear_empty_values
python
def _clear_empty_values(args): ''' Scrap junk data from a dict. ''' result = {} for param in args: if args[param] is not None: result[param] = args[param] return result
Scrap junk data from a dict.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L534-L542
null
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import object from io import StringIO, IOBase import requests import csv import json import logging import re import os from .constants import DEFAULT_API_PATH, OLD_API_PATH, DATASETS_PATH class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close() # helper methods def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response) def _format_old_api_request(dataid=None, content_type=None): if dataid is not None: if content_type is not None: return "{0}/{1}.{2}".format(OLD_API_PATH, dataid, content_type) else: return "{0}/{1}".format(OLD_API_PATH, dataid) else: if content_type is not None: return "{0}.{1}".format(OLD_API_PATH, content_type) else: raise Exception("This method requires at least a dataset_id or content_type.") def _format_new_api_request(dataid=None, row_id=None, content_type=None): if dataid is not None: if content_type is not None: if row_id is not None: return "{0}{1}/{2}.{3}".format(DEFAULT_API_PATH, dataid, row_id, content_type) else: return "{0}{1}.{2}".format(DEFAULT_API_PATH, dataid, content_type) raise Exception("This method requires at least a dataset_id or content_type.") def authentication_validation(username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) is not bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.") def _download_file(url, local_filename): ''' Utility function that downloads a chunked response from the specified url to a local path. This method is suitable for larger downloads. ''' response = requests.get(url, stream=True) with open(local_filename, 'wb') as outfile: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks outfile.write(chunk)
xmunoz/sodapy
sodapy/__init__.py
authentication_validation
python
def authentication_validation(username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) is not bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.")
Only accept one form of authentication.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L570-L580
null
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import object from io import StringIO, IOBase import requests import csv import json import logging import re import os from .constants import DEFAULT_API_PATH, OLD_API_PATH, DATASETS_PATH class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close() # helper methods def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response) def _clear_empty_values(args): ''' Scrap junk data from a dict. ''' result = {} for param in args: if args[param] is not None: result[param] = args[param] return result def _format_old_api_request(dataid=None, content_type=None): if dataid is not None: if content_type is not None: return "{0}/{1}.{2}".format(OLD_API_PATH, dataid, content_type) else: return "{0}/{1}".format(OLD_API_PATH, dataid) else: if content_type is not None: return "{0}.{1}".format(OLD_API_PATH, content_type) else: raise Exception("This method requires at least a dataset_id or content_type.") def _format_new_api_request(dataid=None, row_id=None, content_type=None): if dataid is not None: if content_type is not None: if row_id is not None: return "{0}{1}/{2}.{3}".format(DEFAULT_API_PATH, dataid, row_id, content_type) else: return "{0}{1}.{2}".format(DEFAULT_API_PATH, dataid, content_type) raise Exception("This method requires at least a dataset_id or content_type.") def _download_file(url, local_filename): ''' Utility function that downloads a chunked response from the specified url to a local path. This method is suitable for larger downloads. ''' response = requests.get(url, stream=True) with open(local_filename, 'wb') as outfile: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks outfile.write(chunk)
xmunoz/sodapy
sodapy/__init__.py
_download_file
python
def _download_file(url, local_filename): ''' Utility function that downloads a chunked response from the specified url to a local path. This method is suitable for larger downloads. ''' response = requests.get(url, stream=True) with open(local_filename, 'wb') as outfile: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks outfile.write(chunk)
Utility function that downloads a chunked response from the specified url to a local path. This method is suitable for larger downloads.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L583-L592
null
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import object from io import StringIO, IOBase import requests import csv import json import logging import re import os from .constants import DEFAULT_API_PATH, OLD_API_PATH, DATASETS_PATH class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close() # helper methods def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response) def _clear_empty_values(args): ''' Scrap junk data from a dict. ''' result = {} for param in args: if args[param] is not None: result[param] = args[param] return result def _format_old_api_request(dataid=None, content_type=None): if dataid is not None: if content_type is not None: return "{0}/{1}.{2}".format(OLD_API_PATH, dataid, content_type) else: return "{0}/{1}".format(OLD_API_PATH, dataid) else: if content_type is not None: return "{0}.{1}".format(OLD_API_PATH, content_type) else: raise Exception("This method requires at least a dataset_id or content_type.") def _format_new_api_request(dataid=None, row_id=None, content_type=None): if dataid is not None: if content_type is not None: if row_id is not None: return "{0}{1}/{2}.{3}".format(DEFAULT_API_PATH, dataid, row_id, content_type) else: return "{0}{1}.{2}".format(DEFAULT_API_PATH, dataid, content_type) raise Exception("This method requires at least a dataset_id or content_type.") def authentication_validation(username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) is not bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.")
xmunoz/sodapy
sodapy/__init__.py
Socrata.datasets
python
def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results
Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False)
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L97-L205
[ "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.create
python
def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload)
Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L207-L234
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _clear_empty_values(args):\n '''\n Scrap junk data from a dict.\n '''\n result = {}\n for param in args:\n if args[param] is not None:\n result[param] = args[param]\n return result\n", "def _perform_update(self, method, resource, payload):\n '''\n Execute the update task.\n '''\n\n # python2/3 compatibility wizardry\n try:\n file_type = file\n except NameError:\n file_type = IOBase\n\n if isinstance(payload, (dict, list)):\n response = self._perform_request(method, resource,\n data=json.dumps(payload))\n elif isinstance(payload, file_type):\n headers = {\n \"content-type\": \"text/csv\",\n }\n response = self._perform_request(method, resource, data=payload,\n headers=headers)\n else:\n raise Exception(\"Unrecognized payload {0}. Currently only list-, dictionary-,\"\n \" and file-types are supported.\".format(type(payload)))\n\n return response\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.set_permission
python
def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params)
Set a dataset's permissions to private or public Options are private, public
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L236-L249
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.get_metadata
python
def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource)
Retrieve the metadata for a particular dataset.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L251-L256
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.update_metadata
python
def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields)
Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L258-L267
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_update(self, method, resource, payload):\n '''\n Execute the update task.\n '''\n\n # python2/3 compatibility wizardry\n try:\n file_type = file\n except NameError:\n file_type = IOBase\n\n if isinstance(payload, (dict, list)):\n response = self._perform_request(method, resource,\n data=json.dumps(payload))\n elif isinstance(payload, file_type):\n headers = {\n \"content-type\": \"text/csv\",\n }\n response = self._perform_request(method, resource, data=payload,\n headers=headers)\n else:\n raise Exception(\"Unrecognized payload {0}. Currently only list-, dictionary-,\"\n \" and file-types are supported.\".format(type(payload)))\n\n return response\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.download_attachments
python
def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files
Download all of the attachments associated with a dataset. Return the paths of downloaded files.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L269-L304
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _download_file(url, local_filename):\n '''\n Utility function that downloads a chunked response from the specified url to a local path.\n This method is suitable for larger downloads.\n '''\n response = requests.get(url, stream=True)\n with open(local_filename, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n outfile.write(chunk)\n", "def get_metadata(self, dataset_identifier, content_type=\"json\"):\n '''\n Retrieve the metadata for a particular dataset.\n '''\n resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type)\n return self._perform_request(\"get\", resource)\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.publish
python
def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource)
The create() method creates a dataset in a "working copy" state. This method publishes it.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L306-L314
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.get
python
def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response
Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L316-L363
[ "def _clear_empty_values(args):\n '''\n Scrap junk data from a dict.\n '''\n result = {}\n for param in args:\n if args[param] is not None:\n result[param] = args[param]\n return result\n", "def _format_new_api_request(dataid=None, row_id=None, content_type=None):\n if dataid is not None:\n if content_type is not None:\n if row_id is not None:\n return \"{0}{1}/{2}.{3}\".format(DEFAULT_API_PATH, dataid, row_id, content_type)\n else:\n return \"{0}{1}.{2}\".format(DEFAULT_API_PATH, dataid, content_type)\n\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.upsert
python
def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload)
Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L365-L374
[ "def _format_new_api_request(dataid=None, row_id=None, content_type=None):\n if dataid is not None:\n if content_type is not None:\n if row_id is not None:\n return \"{0}{1}/{2}.{3}\".format(DEFAULT_API_PATH, dataid, row_id, content_type)\n else:\n return \"{0}{1}.{2}\".format(DEFAULT_API_PATH, dataid, content_type)\n\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_update(self, method, resource, payload):\n '''\n Execute the update task.\n '''\n\n # python2/3 compatibility wizardry\n try:\n file_type = file\n except NameError:\n file_type = IOBase\n\n if isinstance(payload, (dict, list)):\n response = self._perform_request(method, resource,\n data=json.dumps(payload))\n elif isinstance(payload, file_type):\n headers = {\n \"content-type\": \"text/csv\",\n }\n response = self._perform_request(method, resource, data=payload,\n headers=headers)\n else:\n raise Exception(\"Unrecognized payload {0}. Currently only list-, dictionary-,\"\n \" and file-types are supported.\".format(type(payload)))\n\n return response\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.create_non_data_file
python
def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data)
Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} )
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L385-L398
[ "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.replace_non_data_file
python
def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data)
Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L400-L415
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata._perform_update
python
def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response
Execute the update task.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L417-L441
[ "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata.delete
python
def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource)
Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4)
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L443-L457
[ "def _format_old_api_request(dataid=None, content_type=None):\n\n if dataid is not None:\n if content_type is not None:\n return \"{0}/{1}.{2}\".format(OLD_API_PATH, dataid, content_type)\n else:\n return \"{0}/{1}\".format(OLD_API_PATH, dataid)\n else:\n if content_type is not None:\n return \"{0}.{1}\".format(OLD_API_PATH, content_type)\n else:\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _format_new_api_request(dataid=None, row_id=None, content_type=None):\n if dataid is not None:\n if content_type is not None:\n if row_id is not None:\n return \"{0}{1}/{2}.{3}\".format(DEFAULT_API_PATH, dataid, row_id, content_type)\n else:\n return \"{0}{1}.{2}\".format(DEFAULT_API_PATH, dataid, content_type)\n\n raise Exception(\"This method requires at least a dataset_id or content_type.\")\n", "def _perform_request(self, request_type, resource, **kwargs):\n '''\n Utility method that performs all requests.\n '''\n request_type_methods = set([\"get\", \"post\", \"put\", \"delete\"])\n if request_type not in request_type_methods:\n raise Exception(\"Unknown request type. Supported request types are\"\n \": {0}\".format(\", \".join(request_type_methods)))\n\n uri = \"{0}{1}{2}\".format(self.uri_prefix, self.domain, resource)\n\n # set a timeout, just to be safe\n kwargs[\"timeout\"] = self.timeout\n\n response = getattr(self.session, request_type)(uri, **kwargs)\n\n # handle errors\n if response.status_code not in (200, 202):\n _raise_for_status(response)\n\n # when responses have no content body (ie. delete, set_permission),\n # simply return the whole response\n if not response.text:\n return response\n\n # for other request types, return most useful data\n content_type = response.headers.get('content-type').strip().lower()\n if re.match(r'application\\/json', content_type):\n return response.json()\n elif re.match(r'text\\/csv', content_type):\n csv_stream = StringIO(response.text)\n return [line for line in csv.reader(csv_stream)]\n elif re.match(r'application\\/rdf\\+xml', content_type):\n return response.content\n elif re.match(r'text\\/plain', content_type):\n try:\n return json.loads(response.text)\n except ValueError:\n return response.text\n else:\n raise Exception(\"Unknown response format: {0}\"\n .format(content_type))\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): ''' Close the session. ''' self.session.close()
xmunoz/sodapy
sodapy/__init__.py
Socrata._perform_request
python
def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = self.timeout response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response.text: return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if re.match(r'application\/json', content_type): return response.json() elif re.match(r'text\/csv', content_type): csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif re.match(r'application\/rdf\+xml', content_type): return response.content elif re.match(r'text\/plain', content_type): try: return json.loads(response.text) except ValueError: return response.text else: raise Exception("Unknown response format: {0}" .format(content_type))
Utility method that performs all requests.
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L459-L500
[ "def _raise_for_status(response):\n '''\n Custom raise_for_status with more appropriate error message.\n '''\n http_error_msg = \"\"\n\n if 400 <= response.status_code < 500:\n http_error_msg = \"{0} Client Error: {1}\".format(response.status_code,\n response.reason)\n\n elif 500 <= response.status_code < 600:\n http_error_msg = \"{0} Server Error: {1}\".format(response.status_code,\n response.reason)\n\n if http_error_msg:\n try:\n more_info = response.json().get(\"message\")\n except ValueError:\n more_info = None\n if more_info and more_info.lower() != response.reason.lower():\n http_error_msg += \".\\n\\t{0}\".format(more_info)\n raise requests.exceptions.HTTPError(http_error_msg, response=response)\n" ]
class Socrata(object): ''' The main class that interacts with the SODA API. Sample usage: from sodapy import Socrata client = Socrata("opendata.socrata.com", None) ''' def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None, timeout=10): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: logging.warning("Requests made without an app_token will be" " subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update( {"Authorization": "OAuth {0}".format(access_token)} ) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https://" if not isinstance(timeout, (int, float)): raise TypeError("Timeout must be numeric.") self.timeout = timeout def __enter__(self): ''' This runs as the with block is set up. ''' return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): ''' This runs at the end of a with block. It simply closes the client. Exceptions are propagated forward in the program as usual, and are not handled here. ''' self.close() def datasets(self, limit=0, offset=0, order=None, **kwargs): ''' Returns the list of datasets associated with a particular domain. WARNING: Large limits (>1000) will return megabytes of data, which can be slow on low-bandwidth networks, and is also a lot of data to hold in memory. This method performs a get request on these type of URLs: https://data.edmonton.ca/api/catalog/v1 limit: max number of results to return, default is all (0) offset: the offset of result set order: field to sort on, optionally with ' ASC' or ' DESC' suffix ids: list of dataset IDs to consider domains: list of domains to search categories: list of categories tags: list of tags only: list of logical types to return, among `api`, `calendar`, `chart`, `datalens`, `dataset`, `federated_href`, `file`, `filter`, `form`, `href`, `link`, `map`, `measure`, `story`, `visualization` shared_to: list of users IDs or team IDs that datasets have to be shared with, or the string `site` meaning anyone on the domain. Note that you may only specify yourself or a team that you are on. Also note that if you search for assets shared to you, assets owned by you might be not be returned. column_names: list of column names that must be present in the tabular datasets q: text query that will be used by Elasticsearch to match results min_should_match: string specifying the number of words from `q` that should match. Refer to Elasticsearch docs for the format, the default is '3<60%', meaning that 60% of the terms must match, or all of them if there are 3 or fewer. attribution: string specifying the organization datasets must come from license: string used to filter on results having a specific license derived_from: string containing the ID of a dataset that must be a parent of the result datasets (for example, charts are derived from a parent dataset) provenance: string 'official' or 'community' for_user: string containing a user ID that must own the returned datasets visibility: string 'open' or 'internal' public: boolean indicating that all returned datasets should be public (True) or private (False) published: boolean indicating that returned datasets should have been published (True) or not yet published (False) approval_status: string 'pending', 'rejected', 'approved', 'not_ready' filtering results by their current status in the approval pipeline explicitly_hidden: boolean filtering out datasets that have been explicitly hidden on a domain (False) or returning only those (True) derived: boolean allowing to search only for derived datasets (True) or only those from which other datasets were derived (False) ''' # Those filters can be passed multiple times; this function expects # an iterable for them filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only', 'shared_to', 'column_names']) # Those filters only get a single value filter_single = set([ 'q', 'min_should_match', 'attribution', 'license', 'derived_from', 'provenance', 'for_user', 'visibility', 'public', 'published', 'approval_status', 'explicitly_hidden', 'derived' ]) all_filters = filter_multiple.union(filter_single) for key in kwargs: if key not in all_filters: raise TypeError("Unexpected keyword argument %s" % key) params = [] if limit: params.append(('limit', limit)) for key, value in kwargs.items(): if key in filter_multiple: for item in value: params.append((key, item)) elif key in filter_single: params.append((key, value)) # TODO: custom domain-specific metadata # https://socratadiscovery.docs.apiary.io/ # #reference/0/find-by-domain-specific-metadata if order: params.append(('order', order)) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) numResults = results['resultSetSize'] # no more results to fetch, or limit reached if (limit >= numResults or limit == len(results['results']) or numResults == len(results['results'])): return results['results'] if limit != 0: raise Exception("Unexpected number of results returned from endpoint.\ Expected {0}, got {1}.".format(limit, len(results['results']))) # get all remaining results all_results = results['results'] while len(all_results) != numResults: offset += len(results["results"]) results = self._perform_request("get", DATASETS_PATH, params=params + [('offset', offset)]) all_results.extend(results['results']) return all_results def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload) def set_permission(self, dataset_identifier, permission="private", content_type="json"): ''' Set a dataset's permissions to private or public Options are private, public ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) params = { "method": "setPermission", "value": "public.read" if permission == "public" else permission } return self._perform_request("put", resource, params=params) def get_metadata(self, dataset_identifier, content_type="json"): ''' Retrieve the metadata for a particular dataset. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("get", resource) def update_metadata(self, dataset_identifier, update_fields, content_type="json"): ''' Update the metadata for a particular dataset. update_fields is a dictionary containing [metadata key:new value] pairs. This method performs a full replace for the key:value pairs listed in `update_fields`, and returns all of the metadata with the updates applied. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, update_fields) def download_attachments(self, dataset_identifier, content_type="json", download_dir="~/sodapy_downloads"): ''' Download all of the attachments associated with a dataset. Return the paths of downloaded files. ''' metadata = self.get_metadata(dataset_identifier, content_type=content_type) files = [] attachments = metadata['metadata'].get("attachments") if not attachments: logging.info("No attachments were found or downloaded.") return files download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier) if not os.path.exists(download_dir): os.makedirs(download_dir) for attachment in attachments: file_path = os.path.join(download_dir, attachment["filename"]) has_assetid = attachment.get("assetId", False) if has_assetid: base = _format_old_api_request(dataid=dataset_identifier) assetid = attachment["assetId"] resource = "{0}/files/{1}?download=true&filename={2}"\ .format(base, assetid, attachment["filename"]) else: base = "/api/assets" assetid = attachment["blobId"] resource = "{0}/{1}?download=true".format(base, assetid) uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource) _download_file(uri, file_path) files.append(file_path) logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files))) return files def publish(self, dataset_identifier, content_type="json"): ''' The create() method creates a dataset in a "working copy" state. This method publishes it. ''' base = _format_old_api_request(dataid=dataset_identifier) resource = "{0}/publication.{1}".format(base, content_type) return self._perform_request("post", resource) def get(self, dataset_identifier, content_type="json", **kwargs): ''' Read data from the requested resource. Options for content_type are json, csv, and xml. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value query : full SoQL query string, all as one parameter exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) # SoQL parameters params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$query": kwargs.pop("query", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } # Additional parameters, such as field names params.update(kwargs) params = _clear_empty_values(params) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload) def replace(self, dataset_identifier, payload, content_type="json"): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("put", resource, payload) def create_non_data_file(self, params, file_data): ''' Creates a new file-based dataset with the name provided in the files tuple. A valid file input would be: files = ( {'file': ("gtfs2", open('myfile.zip', 'rb'))} ) ''' api_prefix = '/api/imports2/' if not params.get('method', None): params['method'] = 'blob' return self._perform_request("post", api_prefix, params=params, files=file_data) def replace_non_data_file(self, dataset_identifier, params, file_data): ''' Same as create_non_data_file, but replaces a file that already exists in a file-based dataset. WARNING: a table-based dataset cannot be replaced by a file-based dataset. Use create_non_data_file in order to replace. ''' resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt") if not params.get('method', None): params['method'] = 'replaceBlob' params['id'] = dataset_identifier return self._perform_request("post", resource, params=params, files=file_data) def _perform_update(self, method, resource, payload): ''' Execute the update task. ''' # python2/3 compatibility wizardry try: file_type = file except NameError: file_type = IOBase if isinstance(payload, (dict, list)): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file_type): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported.".format(type(payload))) return response def delete(self, dataset_identifier, row_id=None, content_type="json"): ''' Delete the entire dataset, e.g. client.delete("nimj-3ivp") or a single row, e.g. client.delete("nimj-3ivp", row_id=4) ''' if row_id: resource = _format_new_api_request(dataid=dataset_identifier, row_id=row_id, content_type=content_type) else: resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_request("delete", resource) def close(self): ''' Close the session. ''' self.session.close()
tableau/document-api-python
tableaudocumentapi/xfile.py
xml_open
python
def xml_open(filename, expected_root=None): # Is the file a zip (.twbx or .tdsx) if zipfile.is_zipfile(filename): tree = get_xml_from_archive(filename) else: tree = ET.parse(filename) # Is the file a supported version tree_root = tree.getroot() file_version = Version(tree_root.attrib.get('version', '0.0')) if file_version < MIN_SUPPORTED_VERSION: raise TableauVersionNotSupportedException(file_version) # Does the root tag match the object type (workbook or data source) if expected_root and (expected_root != tree_root.tag): raise TableauInvalidFileException( "'{}'' is not a valid '{}' file".format(filename, expected_root)) return tree
Opens the provided 'filename'. Handles detecting if the file is an archive, detecting the document version, and validating the root tag.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L24-L46
[ "def get_xml_from_archive(filename):\n with zipfile.ZipFile(filename) as zf:\n with zf.open(find_file_in_zip(zf)) as xml_file:\n xml_tree = ET.parse(xml_file)\n\n return xml_tree\n" ]
import contextlib import os import shutil import tempfile import zipfile import xml.etree.ElementTree as ET try: from distutils2.version import NormalizedVersion as Version except ImportError: from distutils.version import LooseVersion as Version MIN_SUPPORTED_VERSION = Version("9.0") class TableauVersionNotSupportedException(Exception): pass class TableauInvalidFileException(Exception): pass @contextlib.contextmanager def temporary_directory(*args, **kwargs): d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d) def find_file_in_zip(zip_file): '''Returns the twb/tds file from a Tableau packaged file format. Packaged files can contain cache entries which are also valid XML, so only look for files with a .tds or .twb extension. ''' candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'), zip_file.namelist()) for filename in candidate_files: with zip_file.open(filename) as xml_candidate: try: ET.parse(xml_candidate) return filename except ET.ParseError: # That's not an XML file by gosh pass def get_xml_from_archive(filename): with zipfile.ZipFile(filename) as zf: with zf.open(find_file_in_zip(zf)) as xml_file: xml_tree = ET.parse(xml_file) return xml_tree def build_archive_file(archive_contents, zip_file): """Build a Tableau-compatible archive file.""" # This is tested against Desktop and Server, and reverse engineered by lots # of trial and error. Do not change this logic. for root_dir, _, files in os.walk(archive_contents): relative_dir = os.path.relpath(root_dir, archive_contents) for f in files: temp_file_full_path = os.path.join( archive_contents, relative_dir, f) zipname = os.path.join(relative_dir, f) zip_file.write(temp_file_full_path, arcname=zipname) def save_into_archive(xml_tree, filename, new_filename=None): # Saving an archive means extracting the contents into a temp folder, # saving the changes over the twb/tds in that folder, and then # packaging it back up into a zip with a very specific format # e.g. no empty files for directories, which Windows and Mac do by default if new_filename is None: new_filename = filename # Extract to temp directory with temporary_directory() as temp_path: with zipfile.ZipFile(filename) as zf: xml_file = find_file_in_zip(zf) zf.extractall(temp_path) # Write the new version of the file to the temp directory xml_tree.write(os.path.join( temp_path, xml_file), encoding="utf-8", xml_declaration=True) # Write the new archive with the contents of the temp folder with zipfile.ZipFile(new_filename, "w", compression=zipfile.ZIP_DEFLATED) as new_archive: build_archive_file(temp_path, new_archive) def _save_file(container_file, xml_tree, new_filename=None): if new_filename is None: new_filename = container_file if zipfile.is_zipfile(container_file): save_into_archive(xml_tree, container_file, new_filename) else: xml_tree.write(new_filename, encoding="utf-8", xml_declaration=True)
tableau/document-api-python
tableaudocumentapi/xfile.py
find_file_in_zip
python
def find_file_in_zip(zip_file): '''Returns the twb/tds file from a Tableau packaged file format. Packaged files can contain cache entries which are also valid XML, so only look for files with a .tds or .twb extension. ''' candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'), zip_file.namelist()) for filename in candidate_files: with zip_file.open(filename) as xml_candidate: try: ET.parse(xml_candidate) return filename except ET.ParseError: # That's not an XML file by gosh pass
Returns the twb/tds file from a Tableau packaged file format. Packaged files can contain cache entries which are also valid XML, so only look for files with a .tds or .twb extension.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L58-L74
null
import contextlib import os import shutil import tempfile import zipfile import xml.etree.ElementTree as ET try: from distutils2.version import NormalizedVersion as Version except ImportError: from distutils.version import LooseVersion as Version MIN_SUPPORTED_VERSION = Version("9.0") class TableauVersionNotSupportedException(Exception): pass class TableauInvalidFileException(Exception): pass def xml_open(filename, expected_root=None): """Opens the provided 'filename'. Handles detecting if the file is an archive, detecting the document version, and validating the root tag.""" # Is the file a zip (.twbx or .tdsx) if zipfile.is_zipfile(filename): tree = get_xml_from_archive(filename) else: tree = ET.parse(filename) # Is the file a supported version tree_root = tree.getroot() file_version = Version(tree_root.attrib.get('version', '0.0')) if file_version < MIN_SUPPORTED_VERSION: raise TableauVersionNotSupportedException(file_version) # Does the root tag match the object type (workbook or data source) if expected_root and (expected_root != tree_root.tag): raise TableauInvalidFileException( "'{}'' is not a valid '{}' file".format(filename, expected_root)) return tree @contextlib.contextmanager def temporary_directory(*args, **kwargs): d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d) def get_xml_from_archive(filename): with zipfile.ZipFile(filename) as zf: with zf.open(find_file_in_zip(zf)) as xml_file: xml_tree = ET.parse(xml_file) return xml_tree def build_archive_file(archive_contents, zip_file): """Build a Tableau-compatible archive file.""" # This is tested against Desktop and Server, and reverse engineered by lots # of trial and error. Do not change this logic. for root_dir, _, files in os.walk(archive_contents): relative_dir = os.path.relpath(root_dir, archive_contents) for f in files: temp_file_full_path = os.path.join( archive_contents, relative_dir, f) zipname = os.path.join(relative_dir, f) zip_file.write(temp_file_full_path, arcname=zipname) def save_into_archive(xml_tree, filename, new_filename=None): # Saving an archive means extracting the contents into a temp folder, # saving the changes over the twb/tds in that folder, and then # packaging it back up into a zip with a very specific format # e.g. no empty files for directories, which Windows and Mac do by default if new_filename is None: new_filename = filename # Extract to temp directory with temporary_directory() as temp_path: with zipfile.ZipFile(filename) as zf: xml_file = find_file_in_zip(zf) zf.extractall(temp_path) # Write the new version of the file to the temp directory xml_tree.write(os.path.join( temp_path, xml_file), encoding="utf-8", xml_declaration=True) # Write the new archive with the contents of the temp folder with zipfile.ZipFile(new_filename, "w", compression=zipfile.ZIP_DEFLATED) as new_archive: build_archive_file(temp_path, new_archive) def _save_file(container_file, xml_tree, new_filename=None): if new_filename is None: new_filename = container_file if zipfile.is_zipfile(container_file): save_into_archive(xml_tree, container_file, new_filename) else: xml_tree.write(new_filename, encoding="utf-8", xml_declaration=True)
tableau/document-api-python
tableaudocumentapi/xfile.py
build_archive_file
python
def build_archive_file(archive_contents, zip_file): # This is tested against Desktop and Server, and reverse engineered by lots # of trial and error. Do not change this logic. for root_dir, _, files in os.walk(archive_contents): relative_dir = os.path.relpath(root_dir, archive_contents) for f in files: temp_file_full_path = os.path.join( archive_contents, relative_dir, f) zipname = os.path.join(relative_dir, f) zip_file.write(temp_file_full_path, arcname=zipname)
Build a Tableau-compatible archive file.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L85-L96
null
import contextlib import os import shutil import tempfile import zipfile import xml.etree.ElementTree as ET try: from distutils2.version import NormalizedVersion as Version except ImportError: from distutils.version import LooseVersion as Version MIN_SUPPORTED_VERSION = Version("9.0") class TableauVersionNotSupportedException(Exception): pass class TableauInvalidFileException(Exception): pass def xml_open(filename, expected_root=None): """Opens the provided 'filename'. Handles detecting if the file is an archive, detecting the document version, and validating the root tag.""" # Is the file a zip (.twbx or .tdsx) if zipfile.is_zipfile(filename): tree = get_xml_from_archive(filename) else: tree = ET.parse(filename) # Is the file a supported version tree_root = tree.getroot() file_version = Version(tree_root.attrib.get('version', '0.0')) if file_version < MIN_SUPPORTED_VERSION: raise TableauVersionNotSupportedException(file_version) # Does the root tag match the object type (workbook or data source) if expected_root and (expected_root != tree_root.tag): raise TableauInvalidFileException( "'{}'' is not a valid '{}' file".format(filename, expected_root)) return tree @contextlib.contextmanager def temporary_directory(*args, **kwargs): d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d) def find_file_in_zip(zip_file): '''Returns the twb/tds file from a Tableau packaged file format. Packaged files can contain cache entries which are also valid XML, so only look for files with a .tds or .twb extension. ''' candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'), zip_file.namelist()) for filename in candidate_files: with zip_file.open(filename) as xml_candidate: try: ET.parse(xml_candidate) return filename except ET.ParseError: # That's not an XML file by gosh pass def get_xml_from_archive(filename): with zipfile.ZipFile(filename) as zf: with zf.open(find_file_in_zip(zf)) as xml_file: xml_tree = ET.parse(xml_file) return xml_tree def save_into_archive(xml_tree, filename, new_filename=None): # Saving an archive means extracting the contents into a temp folder, # saving the changes over the twb/tds in that folder, and then # packaging it back up into a zip with a very specific format # e.g. no empty files for directories, which Windows and Mac do by default if new_filename is None: new_filename = filename # Extract to temp directory with temporary_directory() as temp_path: with zipfile.ZipFile(filename) as zf: xml_file = find_file_in_zip(zf) zf.extractall(temp_path) # Write the new version of the file to the temp directory xml_tree.write(os.path.join( temp_path, xml_file), encoding="utf-8", xml_declaration=True) # Write the new archive with the contents of the temp folder with zipfile.ZipFile(new_filename, "w", compression=zipfile.ZIP_DEFLATED) as new_archive: build_archive_file(temp_path, new_archive) def _save_file(container_file, xml_tree, new_filename=None): if new_filename is None: new_filename = container_file if zipfile.is_zipfile(container_file): save_into_archive(xml_tree, container_file, new_filename) else: xml_tree.write(new_filename, encoding="utf-8", xml_declaration=True)
tableau/document-api-python
tableaudocumentapi/connection.py
Connection.from_attributes
python
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None, initial_sql=None, authentication=''): root = ET.Element('connection', authentication=authentication) xml = cls(root) xml.server = server xml.dbname = dbname xml.username = username xml.dbclass = dbclass xml.port = port xml.query_band = query_band xml.initial_sql = initial_sql return xml
Creates a new connection that can be added into a Data Source. defaults to `''` which will be treated as 'prompt' by Tableau.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L28-L43
null
class Connection(object): """A class representing connections inside Data Sources.""" def __init__(self, connxml): """Connection is usually instantiated by passing in connection elements in a Data Source. If creating a connection from scratch you can call `from_attributes` passing in the connection attributes. """ self._connectionXML = connxml self._dbname = connxml.get('dbname') self._server = connxml.get('server') self._username = connxml.get('username') self._authentication = connxml.get('authentication') self._class = connxml.get('class') self._port = connxml.get('port', None) self._query_band = connxml.get('query-band-spec', None) self._initial_sql = connxml.get('one-time-sql', None) def __repr__(self): return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self))) @classmethod @property def dbname(self): """Database name for the connection. Not the table name.""" return self._dbname @dbname.setter def dbname(self, value): """ Set the connection's database name property. Args: value: New name of the database. String. Returns: Nothing. """ self._dbname = value self._connectionXML.set('dbname', value) @property def server(self): """Hostname or IP address of the database server. May also be a URL in some connection types.""" return self._server @server.setter def server(self, value): """ Set the connection's server property. Args: value: New server. String. Returns: Nothing. """ self._server = value self._connectionXML.set('server', value) @property def username(self): """Username used to authenticate to the database.""" return self._username @username.setter def username(self, value): """ Set the connection's username property. Args: value: New username value. String. Returns: Nothing. """ self._username = value self._connectionXML.set('username', value) @property def authentication(self): return self._authentication @property def dbclass(self): """The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list can be found in dbclass.py""" return self._class @dbclass.setter def dbclass(self, value): """Set the connection's dbclass property. Args: value: New dbclass value. String. Returns: Nothing. """ if not is_valid_dbclass(value): raise AttributeError("'{}' is not a valid database type".format(value)) self._class = value self._connectionXML.set('class', value) @property def port(self): """Port used to connect to the database.""" return self._port @port.setter def port(self, value): """Set the connection's port property. Args: value: New port value. String. Returns: Nothing. """ self._port = value # If port is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['port'] except KeyError: pass else: self._connectionXML.set('port', value) @property def query_band(self): """Query band passed on connection to database.""" return self._query_band @query_band.setter def query_band(self, value): """Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing. """ self._query_band = value # If query band is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value) @property def initial_sql(self): """Initial SQL to be run.""" return self._initial_sql @initial_sql.setter def initial_sql(self, value): """Set the connection's initial_sql property. Args: value: New initial_sql value. String. Returns: Nothing. """ self._initial_sql = value # If initial_sql is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['one-time-sql'] except KeyError: pass else: self._connectionXML.set('one-time-sql', value)
tableau/document-api-python
tableaudocumentapi/connection.py
Connection.dbname
python
def dbname(self, value): self._dbname = value self._connectionXML.set('dbname', value)
Set the connection's database name property. Args: value: New name of the database. String. Returns: Nothing.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L51-L63
null
class Connection(object): """A class representing connections inside Data Sources.""" def __init__(self, connxml): """Connection is usually instantiated by passing in connection elements in a Data Source. If creating a connection from scratch you can call `from_attributes` passing in the connection attributes. """ self._connectionXML = connxml self._dbname = connxml.get('dbname') self._server = connxml.get('server') self._username = connxml.get('username') self._authentication = connxml.get('authentication') self._class = connxml.get('class') self._port = connxml.get('port', None) self._query_band = connxml.get('query-band-spec', None) self._initial_sql = connxml.get('one-time-sql', None) def __repr__(self): return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self))) @classmethod def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None, initial_sql=None, authentication=''): """Creates a new connection that can be added into a Data Source. defaults to `''` which will be treated as 'prompt' by Tableau.""" root = ET.Element('connection', authentication=authentication) xml = cls(root) xml.server = server xml.dbname = dbname xml.username = username xml.dbclass = dbclass xml.port = port xml.query_band = query_band xml.initial_sql = initial_sql return xml @property def dbname(self): """Database name for the connection. Not the table name.""" return self._dbname @dbname.setter @property def server(self): """Hostname or IP address of the database server. May also be a URL in some connection types.""" return self._server @server.setter def server(self, value): """ Set the connection's server property. Args: value: New server. String. Returns: Nothing. """ self._server = value self._connectionXML.set('server', value) @property def username(self): """Username used to authenticate to the database.""" return self._username @username.setter def username(self, value): """ Set the connection's username property. Args: value: New username value. String. Returns: Nothing. """ self._username = value self._connectionXML.set('username', value) @property def authentication(self): return self._authentication @property def dbclass(self): """The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list can be found in dbclass.py""" return self._class @dbclass.setter def dbclass(self, value): """Set the connection's dbclass property. Args: value: New dbclass value. String. Returns: Nothing. """ if not is_valid_dbclass(value): raise AttributeError("'{}' is not a valid database type".format(value)) self._class = value self._connectionXML.set('class', value) @property def port(self): """Port used to connect to the database.""" return self._port @port.setter def port(self, value): """Set the connection's port property. Args: value: New port value. String. Returns: Nothing. """ self._port = value # If port is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['port'] except KeyError: pass else: self._connectionXML.set('port', value) @property def query_band(self): """Query band passed on connection to database.""" return self._query_band @query_band.setter def query_band(self, value): """Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing. """ self._query_band = value # If query band is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value) @property def initial_sql(self): """Initial SQL to be run.""" return self._initial_sql @initial_sql.setter def initial_sql(self, value): """Set the connection's initial_sql property. Args: value: New initial_sql value. String. Returns: Nothing. """ self._initial_sql = value # If initial_sql is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['one-time-sql'] except KeyError: pass else: self._connectionXML.set('one-time-sql', value)
tableau/document-api-python
tableaudocumentapi/connection.py
Connection.server
python
def server(self, value): self._server = value self._connectionXML.set('server', value)
Set the connection's server property. Args: value: New server. String. Returns: Nothing.
train
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L71-L83
null
class Connection(object): """A class representing connections inside Data Sources.""" def __init__(self, connxml): """Connection is usually instantiated by passing in connection elements in a Data Source. If creating a connection from scratch you can call `from_attributes` passing in the connection attributes. """ self._connectionXML = connxml self._dbname = connxml.get('dbname') self._server = connxml.get('server') self._username = connxml.get('username') self._authentication = connxml.get('authentication') self._class = connxml.get('class') self._port = connxml.get('port', None) self._query_band = connxml.get('query-band-spec', None) self._initial_sql = connxml.get('one-time-sql', None) def __repr__(self): return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self))) @classmethod def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None, initial_sql=None, authentication=''): """Creates a new connection that can be added into a Data Source. defaults to `''` which will be treated as 'prompt' by Tableau.""" root = ET.Element('connection', authentication=authentication) xml = cls(root) xml.server = server xml.dbname = dbname xml.username = username xml.dbclass = dbclass xml.port = port xml.query_band = query_band xml.initial_sql = initial_sql return xml @property def dbname(self): """Database name for the connection. Not the table name.""" return self._dbname @dbname.setter def dbname(self, value): """ Set the connection's database name property. Args: value: New name of the database. String. Returns: Nothing. """ self._dbname = value self._connectionXML.set('dbname', value) @property def server(self): """Hostname or IP address of the database server. May also be a URL in some connection types.""" return self._server @server.setter @property def username(self): """Username used to authenticate to the database.""" return self._username @username.setter def username(self, value): """ Set the connection's username property. Args: value: New username value. String. Returns: Nothing. """ self._username = value self._connectionXML.set('username', value) @property def authentication(self): return self._authentication @property def dbclass(self): """The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list can be found in dbclass.py""" return self._class @dbclass.setter def dbclass(self, value): """Set the connection's dbclass property. Args: value: New dbclass value. String. Returns: Nothing. """ if not is_valid_dbclass(value): raise AttributeError("'{}' is not a valid database type".format(value)) self._class = value self._connectionXML.set('class', value) @property def port(self): """Port used to connect to the database.""" return self._port @port.setter def port(self, value): """Set the connection's port property. Args: value: New port value. String. Returns: Nothing. """ self._port = value # If port is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['port'] except KeyError: pass else: self._connectionXML.set('port', value) @property def query_band(self): """Query band passed on connection to database.""" return self._query_band @query_band.setter def query_band(self, value): """Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing. """ self._query_band = value # If query band is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value) @property def initial_sql(self): """Initial SQL to be run.""" return self._initial_sql @initial_sql.setter def initial_sql(self, value): """Set the connection's initial_sql property. Args: value: New initial_sql value. String. Returns: Nothing. """ self._initial_sql = value # If initial_sql is None we remove the element and don't write it to XML if value is None: try: del self._connectionXML.attrib['one-time-sql'] except KeyError: pass else: self._connectionXML.set('one-time-sql', value)