repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
xtream1101/web-wrapper | web_wrapper/selenium_utils.py | SeleniumUtils._get_site | python | def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code) | Try and return page content in the requested format using selenium | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L63-L101 | null | class SeleniumUtils:
def get_selenium_header(self):
"""
Return server response headers from selenium request
Also includes the keys `status-code` and `status-text`
"""
javascript = """
function parseResponseHeaders( headerStr ){
var headers = {};
if( !headerStr ){
return headers;
}
var headerPairs = headerStr.split('\\u000d\\u000a');
for( var i = 0; i < headerPairs.length; i++ ){
var headerPair = headerPairs[i];
var index = headerPair.indexOf('\\u003a\\u0020');
if( index > 0 ){
var key = headerPair.substring(0, index);
var val = headerPair.substring(index + 2);
headers[key] = val;
}
}
return headers;
}
var req = new XMLHttpRequest();
req.open('GET', document.location, false);
req.send(null);
var header = parseResponseHeaders(req.getAllResponseHeaders().toLowerCase());
header['status-code'] = req.status;
header['status-text'] = req.statusText;
return header;
"""
return self.driver.execute_script(javascript)
def hover(self, element):
"""
In selenium, move cursor over an element
:element: Object found using driver.find_...("element_class/id/etc")
"""
javascript = """var evObj = document.createEvent('MouseEvents');
evObj.initMouseEvent(\"mouseover\", true, false, window, 0, 0, 0, 0, 0, \
false, false, false, false, 0, null);
arguments[0].dispatchEvent(evObj);"""
if self.driver.selenium is not None:
self.driver.selenium.execute_script(javascript, element)
def reload_page(self):
logger.info("Refreshing page...")
if self.driver.selenium is not None:
try:
# Stop the current loading action before refreshing
self.driver.selenium.send_keys(webdriver.common.keys.Keys.ESCAPE)
self.driver.selenium.refresh()
except Exception:
logger.exception("Exception when reloading the page")
def scroll_to_bottom(self):
"""
Scoll to the very bottom of the page
TODO: add increment & delay options to scoll slowly down the whole page to let each section load in
"""
if self.driver.selenium is not None:
try:
self.driver.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except WebDriverException:
self.driver.selenium.execute_script("window.scrollTo(0, 50000);")
except Exception:
logger.exception("Unknown error scrolling page")
def chrome_fullpage_screenshot(self, file, delay=0):
"""
Fullscreen workaround for chrome
Source: http://seleniumpythonqa.blogspot.com/2015/08/generate-full-page-screenshot-in-chrome.html
"""
total_width = self.driver.execute_script("return document.body.offsetWidth")
total_height = self.driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = self.driver.execute_script("return document.body.clientWidth")
viewport_height = self.driver.execute_script("return window.innerHeight")
logger.info("Starting chrome full page screenshot workaround. Total: ({0}, {1}), Viewport: ({2},{3})"
.format(total_width, total_height, viewport_width, viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
logger.debug("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if previous is not None:
self.driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
logger.debug("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(delay)
file_name = "part_{0}.png".format(part)
logger.debug("Capturing {0} ...".format(file_name))
self.driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
logger.debug("Adding to stitched image with offset ({0}, {1})".format(offset[0], offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
logger.info("Finishing chrome full page screenshot workaround...")
return True
|
xtream1101/web-wrapper | web_wrapper/selenium_utils.py | SeleniumUtils.hover | python | def hover(self, element):
javascript = """var evObj = document.createEvent('MouseEvents');
evObj.initMouseEvent(\"mouseover\", true, false, window, 0, 0, 0, 0, 0, \
false, false, false, false, 0, null);
arguments[0].dispatchEvent(evObj);"""
if self.driver.selenium is not None:
self.driver.selenium.execute_script(javascript, element) | In selenium, move cursor over an element
:element: Object found using driver.find_...("element_class/id/etc") | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L103-L114 | null | class SeleniumUtils:
def get_selenium_header(self):
"""
Return server response headers from selenium request
Also includes the keys `status-code` and `status-text`
"""
javascript = """
function parseResponseHeaders( headerStr ){
var headers = {};
if( !headerStr ){
return headers;
}
var headerPairs = headerStr.split('\\u000d\\u000a');
for( var i = 0; i < headerPairs.length; i++ ){
var headerPair = headerPairs[i];
var index = headerPair.indexOf('\\u003a\\u0020');
if( index > 0 ){
var key = headerPair.substring(0, index);
var val = headerPair.substring(index + 2);
headers[key] = val;
}
}
return headers;
}
var req = new XMLHttpRequest();
req.open('GET', document.location, false);
req.send(null);
var header = parseResponseHeaders(req.getAllResponseHeaders().toLowerCase());
header['status-code'] = req.status;
header['status-text'] = req.statusText;
return header;
"""
return self.driver.execute_script(javascript)
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using selenium
"""
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
def reload_page(self):
logger.info("Refreshing page...")
if self.driver.selenium is not None:
try:
# Stop the current loading action before refreshing
self.driver.selenium.send_keys(webdriver.common.keys.Keys.ESCAPE)
self.driver.selenium.refresh()
except Exception:
logger.exception("Exception when reloading the page")
def scroll_to_bottom(self):
"""
Scoll to the very bottom of the page
TODO: add increment & delay options to scoll slowly down the whole page to let each section load in
"""
if self.driver.selenium is not None:
try:
self.driver.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except WebDriverException:
self.driver.selenium.execute_script("window.scrollTo(0, 50000);")
except Exception:
logger.exception("Unknown error scrolling page")
def chrome_fullpage_screenshot(self, file, delay=0):
"""
Fullscreen workaround for chrome
Source: http://seleniumpythonqa.blogspot.com/2015/08/generate-full-page-screenshot-in-chrome.html
"""
total_width = self.driver.execute_script("return document.body.offsetWidth")
total_height = self.driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = self.driver.execute_script("return document.body.clientWidth")
viewport_height = self.driver.execute_script("return window.innerHeight")
logger.info("Starting chrome full page screenshot workaround. Total: ({0}, {1}), Viewport: ({2},{3})"
.format(total_width, total_height, viewport_width, viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
logger.debug("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if previous is not None:
self.driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
logger.debug("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(delay)
file_name = "part_{0}.png".format(part)
logger.debug("Capturing {0} ...".format(file_name))
self.driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
logger.debug("Adding to stitched image with offset ({0}, {1})".format(offset[0], offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
logger.info("Finishing chrome full page screenshot workaround...")
return True
|
xtream1101/web-wrapper | web_wrapper/selenium_utils.py | SeleniumUtils.scroll_to_bottom | python | def scroll_to_bottom(self):
if self.driver.selenium is not None:
try:
self.driver.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except WebDriverException:
self.driver.selenium.execute_script("window.scrollTo(0, 50000);")
except Exception:
logger.exception("Unknown error scrolling page") | Scoll to the very bottom of the page
TODO: add increment & delay options to scoll slowly down the whole page to let each section load in | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L126-L137 | null | class SeleniumUtils:
def get_selenium_header(self):
"""
Return server response headers from selenium request
Also includes the keys `status-code` and `status-text`
"""
javascript = """
function parseResponseHeaders( headerStr ){
var headers = {};
if( !headerStr ){
return headers;
}
var headerPairs = headerStr.split('\\u000d\\u000a');
for( var i = 0; i < headerPairs.length; i++ ){
var headerPair = headerPairs[i];
var index = headerPair.indexOf('\\u003a\\u0020');
if( index > 0 ){
var key = headerPair.substring(0, index);
var val = headerPair.substring(index + 2);
headers[key] = val;
}
}
return headers;
}
var req = new XMLHttpRequest();
req.open('GET', document.location, false);
req.send(null);
var header = parseResponseHeaders(req.getAllResponseHeaders().toLowerCase());
header['status-code'] = req.status;
header['status-text'] = req.statusText;
return header;
"""
return self.driver.execute_script(javascript)
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using selenium
"""
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
def hover(self, element):
"""
In selenium, move cursor over an element
:element: Object found using driver.find_...("element_class/id/etc")
"""
javascript = """var evObj = document.createEvent('MouseEvents');
evObj.initMouseEvent(\"mouseover\", true, false, window, 0, 0, 0, 0, 0, \
false, false, false, false, 0, null);
arguments[0].dispatchEvent(evObj);"""
if self.driver.selenium is not None:
self.driver.selenium.execute_script(javascript, element)
def reload_page(self):
logger.info("Refreshing page...")
if self.driver.selenium is not None:
try:
# Stop the current loading action before refreshing
self.driver.selenium.send_keys(webdriver.common.keys.Keys.ESCAPE)
self.driver.selenium.refresh()
except Exception:
logger.exception("Exception when reloading the page")
def chrome_fullpage_screenshot(self, file, delay=0):
"""
Fullscreen workaround for chrome
Source: http://seleniumpythonqa.blogspot.com/2015/08/generate-full-page-screenshot-in-chrome.html
"""
total_width = self.driver.execute_script("return document.body.offsetWidth")
total_height = self.driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = self.driver.execute_script("return document.body.clientWidth")
viewport_height = self.driver.execute_script("return window.innerHeight")
logger.info("Starting chrome full page screenshot workaround. Total: ({0}, {1}), Viewport: ({2},{3})"
.format(total_width, total_height, viewport_width, viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
logger.debug("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if previous is not None:
self.driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
logger.debug("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(delay)
file_name = "part_{0}.png".format(part)
logger.debug("Capturing {0} ...".format(file_name))
self.driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
logger.debug("Adding to stitched image with offset ({0}, {1})".format(offset[0], offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
logger.info("Finishing chrome full page screenshot workaround...")
return True
|
xtream1101/web-wrapper | web_wrapper/selenium_utils.py | SeleniumUtils.chrome_fullpage_screenshot | python | def chrome_fullpage_screenshot(self, file, delay=0):
total_width = self.driver.execute_script("return document.body.offsetWidth")
total_height = self.driver.execute_script("return document.body.parentNode.scrollHeight")
viewport_width = self.driver.execute_script("return document.body.clientWidth")
viewport_height = self.driver.execute_script("return window.innerHeight")
logger.info("Starting chrome full page screenshot workaround. Total: ({0}, {1}), Viewport: ({2},{3})"
.format(total_width, total_height, viewport_width, viewport_height))
rectangles = []
i = 0
while i < total_height:
ii = 0
top_height = i + viewport_height
if top_height > total_height:
top_height = total_height
while ii < total_width:
top_width = ii + viewport_width
if top_width > total_width:
top_width = total_width
logger.debug("Appending rectangle ({0},{1},{2},{3})".format(ii, i, top_width, top_height))
rectangles.append((ii, i, top_width, top_height))
ii = ii + viewport_width
i = i + viewport_height
stitched_image = Image.new('RGB', (total_width, total_height))
previous = None
part = 0
for rectangle in rectangles:
if previous is not None:
self.driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
logger.debug("Scrolled To ({0},{1})".format(rectangle[0], rectangle[1]))
time.sleep(delay)
file_name = "part_{0}.png".format(part)
logger.debug("Capturing {0} ...".format(file_name))
self.driver.get_screenshot_as_file(file_name)
screenshot = Image.open(file_name)
if rectangle[1] + viewport_height > total_height:
offset = (rectangle[0], total_height - viewport_height)
else:
offset = (rectangle[0], rectangle[1])
logger.debug("Adding to stitched image with offset ({0}, {1})".format(offset[0], offset[1]))
stitched_image.paste(screenshot, offset)
del screenshot
os.remove(file_name)
part = part + 1
previous = rectangle
stitched_image.save(file)
logger.info("Finishing chrome full page screenshot workaround...")
return True | Fullscreen workaround for chrome
Source: http://seleniumpythonqa.blogspot.com/2015/08/generate-full-page-screenshot-in-chrome.html | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L139-L204 | null | class SeleniumUtils:
def get_selenium_header(self):
"""
Return server response headers from selenium request
Also includes the keys `status-code` and `status-text`
"""
javascript = """
function parseResponseHeaders( headerStr ){
var headers = {};
if( !headerStr ){
return headers;
}
var headerPairs = headerStr.split('\\u000d\\u000a');
for( var i = 0; i < headerPairs.length; i++ ){
var headerPair = headerPairs[i];
var index = headerPair.indexOf('\\u003a\\u0020');
if( index > 0 ){
var key = headerPair.substring(0, index);
var val = headerPair.substring(index + 2);
headers[key] = val;
}
}
return headers;
}
var req = new XMLHttpRequest();
req.open('GET', document.location, false);
req.send(null);
var header = parseResponseHeaders(req.getAllResponseHeaders().toLowerCase());
header['status-code'] = req.status;
header['status-text'] = req.statusText;
return header;
"""
return self.driver.execute_script(javascript)
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using selenium
"""
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
def hover(self, element):
"""
In selenium, move cursor over an element
:element: Object found using driver.find_...("element_class/id/etc")
"""
javascript = """var evObj = document.createEvent('MouseEvents');
evObj.initMouseEvent(\"mouseover\", true, false, window, 0, 0, 0, 0, 0, \
false, false, false, false, 0, null);
arguments[0].dispatchEvent(evObj);"""
if self.driver.selenium is not None:
self.driver.selenium.execute_script(javascript, element)
def reload_page(self):
logger.info("Refreshing page...")
if self.driver.selenium is not None:
try:
# Stop the current loading action before refreshing
self.driver.selenium.send_keys(webdriver.common.keys.Keys.ESCAPE)
self.driver.selenium.refresh()
except Exception:
logger.exception("Exception when reloading the page")
def scroll_to_bottom(self):
"""
Scoll to the very bottom of the page
TODO: add increment & delay options to scoll slowly down the whole page to let each section load in
"""
if self.driver.selenium is not None:
try:
self.driver.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except WebDriverException:
self.driver.selenium.execute_script("window.scrollTo(0, 50000);")
except Exception:
logger.exception("Unknown error scrolling page")
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_phantomjs.py | DriverSeleniumPhantomJS.set_proxy | python | def set_proxy(self, proxy, update=True):
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
self.driver_args['service_args'] = self.default_service_args
else:
proxy_parts = cutil.get_proxy_parts(proxy)
self.driver_args['service_args'].extend(['--proxy={host}:{port}'.format(**proxy_parts),
'--proxy-type={schema}'.format(**proxy_parts),
])
if proxy_parts.get('user') is not None:
self.driver_args['service_args'].append('--proxy-auth={user}:{password}'.format(**proxy_parts))
# Recreate webdriver with new proxy settings
if update is True and update_web_driver is True:
self._update() | Set proxy for requests session | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_phantomjs.py#L63-L86 | [
"def _update(self):\n \"\"\"\n Re create the web driver with the new proxy or header settings\n \"\"\"\n logger.debug(\"Update phantomjs web driver\")\n self.quit()\n self._create_session()\n"
] | class DriverSeleniumPhantomJS(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_phantomjs'
self.default_service_args = self.driver_args.get('service_args', [])
self.driver_args['service_args'] = self.default_service_args
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
self.set_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set phantomjs headers")
self.current_headers = headers
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
for key, value in headers.items():
self.dcap['phantomjs.page.customHeaders.{}'.format(key)] = value
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from phantom directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=update)
# Cookies Set/Get
def get_cookies(self):
return self.driver.get_cookies()
def set_cookies(self, cookies):
# TODO: Does not seem to actually set them correctly
self.driver.delete_all_cookies()
for cookie in cookies:
print(cookie)
self.driver.add_cookie({k: cookie[k] for k in ('name', 'value', 'path', 'expirationDate', 'expiry', 'domain') if k in cookie})
def update_cookies(self, cookies):
self.current_cookies.expand(cookies)
self.set_cookies(self.current_cookies)
# Proxy Set/Get
def get_proxy(self):
return self.current_proxy
# Session
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
logger.debug("Create new phantomjs web driver")
self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,
**self.driver_args)
self.set_cookies(self.current_cookies)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update phantomjs web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear proxy data
self.driver_args['service_args'] = self.default_service_args
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_phantomjs.py | DriverSeleniumPhantomJS._create_session | python | def _create_session(self):
logger.debug("Create new phantomjs web driver")
self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,
**self.driver_args)
self.set_cookies(self.current_cookies)
self.driver.set_window_size(1920, 1080) | Creates a fresh session with no/default headers and proxies | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_phantomjs.py#L92-L100 | [
"def set_cookies(self, cookies):\n # TODO: Does not seem to actually set them correctly\n self.driver.delete_all_cookies()\n for cookie in cookies:\n print(cookie)\n self.driver.add_cookie({k: cookie[k] for k in ('name', 'value', 'path', 'expirationDate', 'expiry', 'domain') if k in cookie})\n"
] | class DriverSeleniumPhantomJS(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_phantomjs'
self.default_service_args = self.driver_args.get('service_args', [])
self.driver_args['service_args'] = self.default_service_args
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
self.set_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set phantomjs headers")
self.current_headers = headers
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
for key, value in headers.items():
self.dcap['phantomjs.page.customHeaders.{}'.format(key)] = value
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from phantom directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=update)
# Cookies Set/Get
def get_cookies(self):
return self.driver.get_cookies()
def set_cookies(self, cookies):
# TODO: Does not seem to actually set them correctly
self.driver.delete_all_cookies()
for cookie in cookies:
print(cookie)
self.driver.add_cookie({k: cookie[k] for k in ('name', 'value', 'path', 'expirationDate', 'expiry', 'domain') if k in cookie})
def update_cookies(self, cookies):
self.current_cookies.expand(cookies)
self.set_cookies(self.current_cookies)
# Proxy Set/Get
def set_proxy(self, proxy, update=True):
"""
Set proxy for requests session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
self.driver_args['service_args'] = self.default_service_args
else:
proxy_parts = cutil.get_proxy_parts(proxy)
self.driver_args['service_args'].extend(['--proxy={host}:{port}'.format(**proxy_parts),
'--proxy-type={schema}'.format(**proxy_parts),
])
if proxy_parts.get('user') is not None:
self.driver_args['service_args'].append('--proxy-auth={user}:{password}'.format(**proxy_parts))
# Recreate webdriver with new proxy settings
if update is True and update_web_driver is True:
self._update()
def get_proxy(self):
return self.current_proxy
# Session
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update phantomjs web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear proxy data
self.driver_args['service_args'] = self.default_service_args
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_phantomjs.py | DriverSeleniumPhantomJS.reset | python | def reset(self):
# Kill old connection
self.quit()
# Clear proxy data
self.driver_args['service_args'] = self.default_service_args
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
# Create new web driver
self._create_session() | Kills old session and creates a new one with no proxies or headers | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_phantomjs.py#L110-L121 | [
"def _create_session(self):\n \"\"\"\n Creates a fresh session with no/default headers and proxies\n \"\"\"\n logger.debug(\"Create new phantomjs web driver\")\n self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,\n **self.driver_args)\n self.set_cookies(self.current_cookies)\n self.driver.set_window_size(1920, 1080)\n",
"def quit(self):\n \"\"\"\n Generic function to close distroy and session data\n \"\"\"\n if self.driver is not None:\n self.driver.quit()\n self.driver = None\n"
] | class DriverSeleniumPhantomJS(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_phantomjs'
self.default_service_args = self.driver_args.get('service_args', [])
self.driver_args['service_args'] = self.default_service_args
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
self.set_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set phantomjs headers")
self.current_headers = headers
# Clear headers
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
for key, value in headers.items():
self.dcap['phantomjs.page.customHeaders.{}'.format(key)] = value
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from phantom directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=update)
# Cookies Set/Get
def get_cookies(self):
return self.driver.get_cookies()
def set_cookies(self, cookies):
# TODO: Does not seem to actually set them correctly
self.driver.delete_all_cookies()
for cookie in cookies:
print(cookie)
self.driver.add_cookie({k: cookie[k] for k in ('name', 'value', 'path', 'expirationDate', 'expiry', 'domain') if k in cookie})
def update_cookies(self, cookies):
self.current_cookies.expand(cookies)
self.set_cookies(self.current_cookies)
# Proxy Set/Get
def set_proxy(self, proxy, update=True):
"""
Set proxy for requests session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
self.driver_args['service_args'] = self.default_service_args
else:
proxy_parts = cutil.get_proxy_parts(proxy)
self.driver_args['service_args'].extend(['--proxy={host}:{port}'.format(**proxy_parts),
'--proxy-type={schema}'.format(**proxy_parts),
])
if proxy_parts.get('user') is not None:
self.driver_args['service_args'].append('--proxy-auth={user}:{password}'.format(**proxy_parts))
# Recreate webdriver with new proxy settings
if update is True and update_web_driver is True:
self._update()
def get_proxy(self):
return self.current_proxy
# Session
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
logger.debug("Create new phantomjs web driver")
self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,
**self.driver_args)
self.set_cookies(self.current_cookies)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update phantomjs web driver")
self.quit()
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_chrome.py | DriverSeleniumChrome.set_proxy | python | def set_proxy(self, proxy, update=True):
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update() | Set proxy for chrome session | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_chrome.py#L46-L71 | [
"def _update(self):\n \"\"\"\n Re create the web driver with the new proxy or header settings\n \"\"\"\n logger.debug(\"Update chrome web driver\")\n self.quit()\n self._create_session()\n",
"def _proxy_extension(self, proxy_parts):\n \"\"\"\n Creates a chrome extension for the proxy\n Only need to be done this way when using a proxy with auth\n \"\"\"\n import zipfile\n manifest_json = \"\"\"\n {\n \"version\": \"1.0.0\",\n \"manifest_version\": 2,\n \"name\": \"Chrome Proxy\",\n \"permissions\": [\n \"proxy\",\n \"tabs\",\n \"unlimitedStorage\",\n \"storage\",\n \"<all_urls>\",\n \"webRequest\",\n \"webRequestBlocking\"\n ],\n \"background\": {\n \"scripts\": [\"background.js\"]\n },\n \"minimum_chrome_version\":\"22.0.0\"\n }\n \"\"\"\n\n background_js = \"\"\"\n var config = {{\n mode: \"fixed_servers\",\n rules: {{\n singleProxy: {{\n scheme: \"{schema}\",\n host: \"{host}\",\n port: parseInt({port})\n }},\n bypassList: []\n }}\n }};\n\n chrome.proxy.settings.set({{value: config, scope: \"regular\"}}, function() {{}});\n\n function callbackFn(details) {{\n return {{\n authCredentials: {{\n username: \"{user}\",\n password: \"{password}\"\n }}\n }};\n }}\n\n chrome.webRequest.onAuthRequired.addListener(\n callbackFn,\n {{urls: [\"<all_urls>\"]}},\n ['blocking']\n );\n \"\"\".format(**proxy_parts)\n\n plugin_file = 'proxy_auth_plugin.zip'\n with zipfile.ZipFile(plugin_file, 'w') as zp:\n zp.writestr(\"manifest.json\", manifest_json)\n zp.writestr(\"background.js\", background_js)\n\n return plugin_file\n"
] | class DriverSeleniumChrome(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_chrome'
self.opts = webdriver.ChromeOptions()
self.update_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set chrome headers")
self.current_headers = headers
# TODO: Remove any headrs that are no longer in the dict
# Clear headers?
# TODO
plugin_path = self._header_extension(add_or_modify_headers=self.current_headers)
self.opts.add_extension(plugin_path)
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from chrome directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=True)
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update chrome web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
##
# Chrome Utils
##
def _proxy_extension(self, proxy_parts):
"""
Creates a chrome extension for the proxy
Only need to be done this way when using a proxy with auth
"""
import zipfile
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {{
mode: "fixed_servers",
rules: {{
singleProxy: {{
scheme: "{schema}",
host: "{host}",
port: parseInt({port})
}},
bypassList: []
}}
}};
chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});
function callbackFn(details) {{
return {{
authCredentials: {{
username: "{user}",
password: "{password}"
}}
}};
}}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{{urls: ["<all_urls>"]}},
['blocking']
);
""".format(**proxy_parts)
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
"""Create modheaders extension
Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/
kwargs:
remove_headers (list): headers name to remove
add_or_modify_headers (dict): ie. {"Header-Name": "Header Value"}
return str -> plugin path
"""
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_chrome.py | DriverSeleniumChrome._create_session | python | def _create_session(self):
self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)
self.driver.set_window_size(1920, 1080) | Creates a fresh session with no/default headers and proxies | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_chrome.py#L73-L78 | null | class DriverSeleniumChrome(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_chrome'
self.opts = webdriver.ChromeOptions()
self.update_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set chrome headers")
self.current_headers = headers
# TODO: Remove any headrs that are no longer in the dict
# Clear headers?
# TODO
plugin_path = self._header_extension(add_or_modify_headers=self.current_headers)
self.opts.add_extension(plugin_path)
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from chrome directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=True)
def set_proxy(self, proxy, update=True):
"""
Set proxy for chrome session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update()
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update chrome web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
##
# Chrome Utils
##
def _proxy_extension(self, proxy_parts):
"""
Creates a chrome extension for the proxy
Only need to be done this way when using a proxy with auth
"""
import zipfile
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {{
mode: "fixed_servers",
rules: {{
singleProxy: {{
scheme: "{schema}",
host: "{host}",
port: parseInt({port})
}},
bypassList: []
}}
}};
chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});
function callbackFn(details) {{
return {{
authCredentials: {{
username: "{user}",
password: "{password}"
}}
}};
}}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{{urls: ["<all_urls>"]}},
['blocking']
);
""".format(**proxy_parts)
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
"""Create modheaders extension
Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/
kwargs:
remove_headers (list): headers name to remove
add_or_modify_headers (dict): ie. {"Header-Name": "Header Value"}
return str -> plugin path
"""
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_chrome.py | DriverSeleniumChrome.reset | python | def reset(self):
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session() | Kills old session and creates a new one with no proxies or headers | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_chrome.py#L88-L97 | [
"def _create_session(self):\n \"\"\"\n Creates a fresh session with no/default headers and proxies\n \"\"\"\n self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)\n self.driver.set_window_size(1920, 1080)\n",
"def quit(self):\n \"\"\"\n Generic function to close distroy and session data\n \"\"\"\n if self.driver is not None:\n self.driver.quit()\n self.driver = None\n"
] | class DriverSeleniumChrome(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_chrome'
self.opts = webdriver.ChromeOptions()
self.update_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set chrome headers")
self.current_headers = headers
# TODO: Remove any headrs that are no longer in the dict
# Clear headers?
# TODO
plugin_path = self._header_extension(add_or_modify_headers=self.current_headers)
self.opts.add_extension(plugin_path)
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from chrome directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=True)
def set_proxy(self, proxy, update=True):
"""
Set proxy for chrome session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update()
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update chrome web driver")
self.quit()
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
##
# Chrome Utils
##
def _proxy_extension(self, proxy_parts):
"""
Creates a chrome extension for the proxy
Only need to be done this way when using a proxy with auth
"""
import zipfile
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {{
mode: "fixed_servers",
rules: {{
singleProxy: {{
scheme: "{schema}",
host: "{host}",
port: parseInt({port})
}},
bypassList: []
}}
}};
chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});
function callbackFn(details) {{
return {{
authCredentials: {{
username: "{user}",
password: "{password}"
}}
}};
}}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{{urls: ["<all_urls>"]}},
['blocking']
);
""".format(**proxy_parts)
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
"""Create modheaders extension
Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/
kwargs:
remove_headers (list): headers name to remove
add_or_modify_headers (dict): ie. {"Header-Name": "Header Value"}
return str -> plugin path
"""
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_chrome.py | DriverSeleniumChrome._proxy_extension | python | def _proxy_extension(self, proxy_parts):
import zipfile
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {{
mode: "fixed_servers",
rules: {{
singleProxy: {{
scheme: "{schema}",
host: "{host}",
port: parseInt({port})
}},
bypassList: []
}}
}};
chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});
function callbackFn(details) {{
return {{
authCredentials: {{
username: "{user}",
password: "{password}"
}}
}};
}}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{{urls: ["<all_urls>"]}},
['blocking']
);
""".format(**proxy_parts)
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file | Creates a chrome extension for the proxy
Only need to be done this way when using a proxy with auth | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_chrome.py#L110-L173 | null | class DriverSeleniumChrome(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_chrome'
self.opts = webdriver.ChromeOptions()
self.update_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set chrome headers")
self.current_headers = headers
# TODO: Remove any headrs that are no longer in the dict
# Clear headers?
# TODO
plugin_path = self._header_extension(add_or_modify_headers=self.current_headers)
self.opts.add_extension(plugin_path)
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from chrome directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=True)
def set_proxy(self, proxy, update=True):
"""
Set proxy for chrome session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update()
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update chrome web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
##
# Chrome Utils
##
def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
"""Create modheaders extension
Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/
kwargs:
remove_headers (list): headers name to remove
add_or_modify_headers (dict): ie. {"Header-Name": "Header Value"}
return str -> plugin path
"""
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
|
xtream1101/web-wrapper | web_wrapper/driver_selenium_chrome.py | DriverSeleniumChrome._header_extension | python | def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file | Create modheaders extension
Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/
kwargs:
remove_headers (list): headers name to remove
add_or_modify_headers (dict): ie. {"Header-Name": "Header Value"}
return str -> plugin path | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_chrome.py#L175-L279 | null | class DriverSeleniumChrome(Web, SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'selenium_chrome'
self.opts = webdriver.ChromeOptions()
self.update_headers(self.current_headers, update=False)
self.set_proxy(self.current_proxy, update=False)
self._create_session()
# Headers Set/Get
def set_headers(self, headers, update=True):
logger.debug("Set chrome headers")
self.current_headers = headers
# TODO: Remove any headrs that are no longer in the dict
# Clear headers?
# TODO
plugin_path = self._header_extension(add_or_modify_headers=self.current_headers)
self.opts.add_extension(plugin_path)
if update is True:
# Recreate webdriver with new header
self._update()
def get_headers(self):
# TODO: Try and get from chrome directly to be accurate
return self.current_headers
def update_headers(self, headers, update=True):
self.current_headers.update(headers)
self.set_headers(self.current_headers, update=True)
def set_proxy(self, proxy, update=True):
"""
Set proxy for chrome session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update()
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
self.driver = webdriver.Chrome(chrome_options=self.opts, **self.driver_args)
self.driver.set_window_size(1920, 1080)
def _update(self):
"""
Re create the web driver with the new proxy or header settings
"""
logger.debug("Update chrome web driver")
self.quit()
self._create_session()
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
if self.driver is not None:
self.driver.quit()
self.driver = None
##
# Chrome Utils
##
def _proxy_extension(self, proxy_parts):
"""
Creates a chrome extension for the proxy
Only need to be done this way when using a proxy with auth
"""
import zipfile
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {{
mode: "fixed_servers",
rules: {{
singleProxy: {{
scheme: "{schema}",
host: "{host}",
port: parseInt({port})
}},
bypassList: []
}}
}};
chrome.proxy.settings.set({{value: config, scope: "regular"}}, function() {{}});
function callbackFn(details) {{
return {{
authCredentials: {{
username: "{user}",
password: "{password}"
}}
}};
}}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{{urls: ["<all_urls>"]}},
['blocking']
);
""".format(**proxy_parts)
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file
|
xtream1101/web-wrapper | web_wrapper/driver_requests.py | DriverRequests.set_proxy | python | def set_proxy(self, proxy):
# TODO: Validate proxy url format
if proxy is None:
self.driver.proxies = {'http': None,
'https': None
}
else:
self.driver.proxies = {'http': proxy,
'https': proxy
}
self.current_proxy = proxy | Set proxy for requests session | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_requests.py#L54-L69 | null | class DriverRequests(Web):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'requests'
self._create_session()
# Headers Set/Get
def get_headers(self):
return self.driver.headers
def set_headers(self, headers):
self.driver.headers = headers
def update_headers(self, headers):
self.driver.headers.update(headers)
# Cookies Set/Get
def get_cookies(self):
return self.driver.cookies.get_dict()
def _clean_cookies(self, cookies):
clean_cookies = []
if isinstance(cookies, dict) is True:
cookies = [cookies]
for cookie in cookies:
if 'name' in cookie and 'value' in cookie:
clean_cookies.append({cookie['name']: cookie['value']})
else:
name = list(cookie.keys())[0]
clean_cookies.append({name: cookie[name]})
return clean_cookies
def set_cookies(self, cookies):
self.driver.cookies = self._clean_cookies(cookies)
def update_cookies(self, cookies):
for cookie in self._clean_cookies(cookies):
self.driver.cookies.update(cookie)
# Proxy Set/Get
def get_proxy(self):
return self.current_proxy
# Session
def _create_session(self):
"""
Creates a fresh session with the default header (random UA)
"""
self.driver = requests.Session(**self.driver_args)
# Set default headers
self.update_headers(self.current_headers)
self.update_cookies(self.current_cookies)
self.set_proxy(self.current_proxy)
def reset(self):
"""
Kills old session and creates a new one with the default headers
"""
self.driver = None
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
self.driver = None
# Actions
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using requests
"""
try:
# Headers and cookies are combined to the ones stored in the requests session
# Ones passed in here will override the ones in the session if they are the same key
response = self.driver.get(url,
*driver_args,
headers=headers,
cookies=cookies,
timeout=timeout,
**driver_kwargs)
# Set data to access from script
self.status_code = response.status_code
self.url = response.url
self.response = response
if response.status_code == requests.codes.ok:
# Return the correct format
return response.text
response.raise_for_status()
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
|
xtream1101/web-wrapper | web_wrapper/driver_requests.py | DriverRequests._create_session | python | def _create_session(self):
self.driver = requests.Session(**self.driver_args)
# Set default headers
self.update_headers(self.current_headers)
self.update_cookies(self.current_cookies)
self.set_proxy(self.current_proxy) | Creates a fresh session with the default header (random UA) | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_requests.py#L75-L83 | [
"def update_headers(self, headers):\n self.driver.headers.update(headers)\n",
"def update_cookies(self, cookies):\n for cookie in self._clean_cookies(cookies):\n self.driver.cookies.update(cookie)\n",
"def set_proxy(self, proxy):\n \"\"\"\n Set proxy for requests session\n \"\"\"\n # TODO: Validate proxy url format\n\n if proxy is None:\n self.driver.proxies = {'http': None,\n 'https': None\n }\n else:\n self.driver.proxies = {'http': proxy,\n 'https': proxy\n }\n\n self.current_proxy = proxy\n"
] | class DriverRequests(Web):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'requests'
self._create_session()
# Headers Set/Get
def get_headers(self):
return self.driver.headers
def set_headers(self, headers):
self.driver.headers = headers
def update_headers(self, headers):
self.driver.headers.update(headers)
# Cookies Set/Get
def get_cookies(self):
return self.driver.cookies.get_dict()
def _clean_cookies(self, cookies):
clean_cookies = []
if isinstance(cookies, dict) is True:
cookies = [cookies]
for cookie in cookies:
if 'name' in cookie and 'value' in cookie:
clean_cookies.append({cookie['name']: cookie['value']})
else:
name = list(cookie.keys())[0]
clean_cookies.append({name: cookie[name]})
return clean_cookies
def set_cookies(self, cookies):
self.driver.cookies = self._clean_cookies(cookies)
def update_cookies(self, cookies):
for cookie in self._clean_cookies(cookies):
self.driver.cookies.update(cookie)
# Proxy Set/Get
def set_proxy(self, proxy):
"""
Set proxy for requests session
"""
# TODO: Validate proxy url format
if proxy is None:
self.driver.proxies = {'http': None,
'https': None
}
else:
self.driver.proxies = {'http': proxy,
'https': proxy
}
self.current_proxy = proxy
def get_proxy(self):
return self.current_proxy
# Session
def reset(self):
"""
Kills old session and creates a new one with the default headers
"""
self.driver = None
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
self.driver = None
# Actions
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using requests
"""
try:
# Headers and cookies are combined to the ones stored in the requests session
# Ones passed in here will override the ones in the session if they are the same key
response = self.driver.get(url,
*driver_args,
headers=headers,
cookies=cookies,
timeout=timeout,
**driver_kwargs)
# Set data to access from script
self.status_code = response.status_code
self.url = response.url
self.response = response
if response.status_code == requests.codes.ok:
# Return the correct format
return response.text
response.raise_for_status()
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
|
xtream1101/web-wrapper | web_wrapper/driver_requests.py | DriverRequests._get_site | python | def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
try:
# Headers and cookies are combined to the ones stored in the requests session
# Ones passed in here will override the ones in the session if they are the same key
response = self.driver.get(url,
*driver_args,
headers=headers,
cookies=cookies,
timeout=timeout,
**driver_kwargs)
# Set data to access from script
self.status_code = response.status_code
self.url = response.url
self.response = response
if response.status_code == requests.codes.ok:
# Return the correct format
return response.text
response.raise_for_status()
except Exception as e:
raise e.with_traceback(sys.exc_info()[2]) | Try and return page content in the requested format using requests | train | https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_requests.py#L99-L125 | null | class DriverRequests(Web):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'requests'
self._create_session()
# Headers Set/Get
def get_headers(self):
return self.driver.headers
def set_headers(self, headers):
self.driver.headers = headers
def update_headers(self, headers):
self.driver.headers.update(headers)
# Cookies Set/Get
def get_cookies(self):
return self.driver.cookies.get_dict()
def _clean_cookies(self, cookies):
clean_cookies = []
if isinstance(cookies, dict) is True:
cookies = [cookies]
for cookie in cookies:
if 'name' in cookie and 'value' in cookie:
clean_cookies.append({cookie['name']: cookie['value']})
else:
name = list(cookie.keys())[0]
clean_cookies.append({name: cookie[name]})
return clean_cookies
def set_cookies(self, cookies):
self.driver.cookies = self._clean_cookies(cookies)
def update_cookies(self, cookies):
for cookie in self._clean_cookies(cookies):
self.driver.cookies.update(cookie)
# Proxy Set/Get
def set_proxy(self, proxy):
"""
Set proxy for requests session
"""
# TODO: Validate proxy url format
if proxy is None:
self.driver.proxies = {'http': None,
'https': None
}
else:
self.driver.proxies = {'http': proxy,
'https': proxy
}
self.current_proxy = proxy
def get_proxy(self):
return self.current_proxy
# Session
def _create_session(self):
"""
Creates a fresh session with the default header (random UA)
"""
self.driver = requests.Session(**self.driver_args)
# Set default headers
self.update_headers(self.current_headers)
self.update_cookies(self.current_cookies)
self.set_proxy(self.current_proxy)
def reset(self):
"""
Kills old session and creates a new one with the default headers
"""
self.driver = None
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
self.driver = None
# Actions
|
MacHu-GWU/windtalker-project | windtalker/asymmetric.py | AsymmetricCipher.newkeys | python | def newkeys(nbits=1024):
pubkey, privkey = rsa.newkeys(nbits, poolsize=1)
return pubkey, privkey | Create a new pair of public and private key pair to use. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/asymmetric.py#L48-L53 | null | class AsymmetricCipher(BaseCipher):
"""
A asymmetric encryption algorithm utility class helps you easily
encrypt/decrypt text and files.
:param my_pubkey: your public key
:param my_privkey: your private key
:param his_pubkey: other's public key you use to encrypt message
**中文文档**
非对称加密器。
"""
# key length/max length msg, 512/53, 1024/117, 2045/245
_encrypt_chunk_size = 53
_decrypt_chunk_size = 53
def __init__(self, my_pubkey, my_privkey, his_pubkey):
self.my_pubkey = my_pubkey
self.my_privkey = my_privkey
self.his_pubkey = his_pubkey
@staticmethod
def encrypt(self, binary, use_sign=True):
"""
Encrypt binary data.
**中文文档**
- 发送消息时只需要对方的pubkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
token = rsa.encrypt(binary, self.his_pubkey) # encrypt it
if use_sign:
self.sign = rsa.sign(binary, self.my_privkey, "SHA-1") # sign it
return token
def decrypt(self, token, signature=None):
"""
Decrypt binary data.
**中文文档**
- 接收消息时只需要自己的privkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
binary = rsa.decrypt(token, self.my_privkey)
if signature:
rsa.verify(binary, signature, self.his_pubkey)
return binary
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Encrypt a file using rsa.
RSA for big file encryption is very slow. For big file, I recommend
to use symmetric encryption and use RSA to encrypt the password.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
encrypt_bigfile(infile, outfile, self.his_pubkey)
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Decrypt a file using rsa.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
decrypt_bigfile(infile, outfile, self.my_privkey)
|
MacHu-GWU/windtalker-project | windtalker/asymmetric.py | AsymmetricCipher.encrypt | python | def encrypt(self, binary, use_sign=True):
token = rsa.encrypt(binary, self.his_pubkey) # encrypt it
if use_sign:
self.sign = rsa.sign(binary, self.my_privkey, "SHA-1") # sign it
return token | Encrypt binary data.
**中文文档**
- 发送消息时只需要对方的pubkey
- 如需使用签名, 则双方都需要持有对方的pubkey | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/asymmetric.py#L55-L67 | null | class AsymmetricCipher(BaseCipher):
"""
A asymmetric encryption algorithm utility class helps you easily
encrypt/decrypt text and files.
:param my_pubkey: your public key
:param my_privkey: your private key
:param his_pubkey: other's public key you use to encrypt message
**中文文档**
非对称加密器。
"""
# key length/max length msg, 512/53, 1024/117, 2045/245
_encrypt_chunk_size = 53
_decrypt_chunk_size = 53
def __init__(self, my_pubkey, my_privkey, his_pubkey):
self.my_pubkey = my_pubkey
self.my_privkey = my_privkey
self.his_pubkey = his_pubkey
@staticmethod
def newkeys(nbits=1024):
"""
Create a new pair of public and private key pair to use.
"""
pubkey, privkey = rsa.newkeys(nbits, poolsize=1)
return pubkey, privkey
def decrypt(self, token, signature=None):
"""
Decrypt binary data.
**中文文档**
- 接收消息时只需要自己的privkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
binary = rsa.decrypt(token, self.my_privkey)
if signature:
rsa.verify(binary, signature, self.his_pubkey)
return binary
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Encrypt a file using rsa.
RSA for big file encryption is very slow. For big file, I recommend
to use symmetric encryption and use RSA to encrypt the password.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
encrypt_bigfile(infile, outfile, self.his_pubkey)
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Decrypt a file using rsa.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
decrypt_bigfile(infile, outfile, self.my_privkey)
|
MacHu-GWU/windtalker-project | windtalker/asymmetric.py | AsymmetricCipher.decrypt | python | def decrypt(self, token, signature=None):
binary = rsa.decrypt(token, self.my_privkey)
if signature:
rsa.verify(binary, signature, self.his_pubkey)
return binary | Decrypt binary data.
**中文文档**
- 接收消息时只需要自己的privkey
- 如需使用签名, 则双方都需要持有对方的pubkey | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/asymmetric.py#L69-L81 | null | class AsymmetricCipher(BaseCipher):
"""
A asymmetric encryption algorithm utility class helps you easily
encrypt/decrypt text and files.
:param my_pubkey: your public key
:param my_privkey: your private key
:param his_pubkey: other's public key you use to encrypt message
**中文文档**
非对称加密器。
"""
# key length/max length msg, 512/53, 1024/117, 2045/245
_encrypt_chunk_size = 53
_decrypt_chunk_size = 53
def __init__(self, my_pubkey, my_privkey, his_pubkey):
self.my_pubkey = my_pubkey
self.my_privkey = my_privkey
self.his_pubkey = his_pubkey
@staticmethod
def newkeys(nbits=1024):
"""
Create a new pair of public and private key pair to use.
"""
pubkey, privkey = rsa.newkeys(nbits, poolsize=1)
return pubkey, privkey
def encrypt(self, binary, use_sign=True):
"""
Encrypt binary data.
**中文文档**
- 发送消息时只需要对方的pubkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
token = rsa.encrypt(binary, self.his_pubkey) # encrypt it
if use_sign:
self.sign = rsa.sign(binary, self.my_privkey, "SHA-1") # sign it
return token
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Encrypt a file using rsa.
RSA for big file encryption is very slow. For big file, I recommend
to use symmetric encryption and use RSA to encrypt the password.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
encrypt_bigfile(infile, outfile, self.his_pubkey)
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Decrypt a file using rsa.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
decrypt_bigfile(infile, outfile, self.my_privkey)
|
MacHu-GWU/windtalker-project | windtalker/asymmetric.py | AsymmetricCipher.encrypt_file | python | def encrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
encrypt_bigfile(infile, outfile, self.his_pubkey) | Encrypt a file using rsa.
RSA for big file encryption is very slow. For big file, I recommend
to use symmetric encryption and use RSA to encrypt the password. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/asymmetric.py#L83-L100 | [
"def process_dst_overwrite_args(src,\n dst=None,\n overwrite=True,\n src_to_dst_func=None):\n src = os.path.abspath(src)\n\n if dst is None:\n dst = src_to_dst_func(src)\n\n if not overwrite:\n if os.path.exists(dst):\n raise EnvironmentError(\n \"output path '%s' already exists..\" % dst)\n\n return src, dst\n"
] | class AsymmetricCipher(BaseCipher):
"""
A asymmetric encryption algorithm utility class helps you easily
encrypt/decrypt text and files.
:param my_pubkey: your public key
:param my_privkey: your private key
:param his_pubkey: other's public key you use to encrypt message
**中文文档**
非对称加密器。
"""
# key length/max length msg, 512/53, 1024/117, 2045/245
_encrypt_chunk_size = 53
_decrypt_chunk_size = 53
def __init__(self, my_pubkey, my_privkey, his_pubkey):
self.my_pubkey = my_pubkey
self.my_privkey = my_privkey
self.his_pubkey = his_pubkey
@staticmethod
def newkeys(nbits=1024):
"""
Create a new pair of public and private key pair to use.
"""
pubkey, privkey = rsa.newkeys(nbits, poolsize=1)
return pubkey, privkey
def encrypt(self, binary, use_sign=True):
"""
Encrypt binary data.
**中文文档**
- 发送消息时只需要对方的pubkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
token = rsa.encrypt(binary, self.his_pubkey) # encrypt it
if use_sign:
self.sign = rsa.sign(binary, self.my_privkey, "SHA-1") # sign it
return token
def decrypt(self, token, signature=None):
"""
Decrypt binary data.
**中文文档**
- 接收消息时只需要自己的privkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
binary = rsa.decrypt(token, self.my_privkey)
if signature:
rsa.verify(binary, signature, self.his_pubkey)
return binary
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Decrypt a file using rsa.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
decrypt_bigfile(infile, outfile, self.my_privkey)
|
MacHu-GWU/windtalker-project | windtalker/asymmetric.py | AsymmetricCipher.decrypt_file | python | def decrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
decrypt_bigfile(infile, outfile, self.my_privkey) | Decrypt a file using rsa. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/asymmetric.py#L102-L116 | [
"def process_dst_overwrite_args(src,\n dst=None,\n overwrite=True,\n src_to_dst_func=None):\n src = os.path.abspath(src)\n\n if dst is None:\n dst = src_to_dst_func(src)\n\n if not overwrite:\n if os.path.exists(dst):\n raise EnvironmentError(\n \"output path '%s' already exists..\" % dst)\n\n return src, dst\n"
] | class AsymmetricCipher(BaseCipher):
"""
A asymmetric encryption algorithm utility class helps you easily
encrypt/decrypt text and files.
:param my_pubkey: your public key
:param my_privkey: your private key
:param his_pubkey: other's public key you use to encrypt message
**中文文档**
非对称加密器。
"""
# key length/max length msg, 512/53, 1024/117, 2045/245
_encrypt_chunk_size = 53
_decrypt_chunk_size = 53
def __init__(self, my_pubkey, my_privkey, his_pubkey):
self.my_pubkey = my_pubkey
self.my_privkey = my_privkey
self.his_pubkey = his_pubkey
@staticmethod
def newkeys(nbits=1024):
"""
Create a new pair of public and private key pair to use.
"""
pubkey, privkey = rsa.newkeys(nbits, poolsize=1)
return pubkey, privkey
def encrypt(self, binary, use_sign=True):
"""
Encrypt binary data.
**中文文档**
- 发送消息时只需要对方的pubkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
token = rsa.encrypt(binary, self.his_pubkey) # encrypt it
if use_sign:
self.sign = rsa.sign(binary, self.my_privkey, "SHA-1") # sign it
return token
def decrypt(self, token, signature=None):
"""
Decrypt binary data.
**中文文档**
- 接收消息时只需要自己的privkey
- 如需使用签名, 则双方都需要持有对方的pubkey
"""
binary = rsa.decrypt(token, self.my_privkey)
if signature:
rsa.verify(binary, signature, self.his_pubkey)
return binary
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
enable_verbose=True):
"""
Encrypt a file using rsa.
RSA for big file encryption is very slow. For big file, I recommend
to use symmetric encryption and use RSA to encrypt the password.
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
with open(path, "rb") as infile, open(output_path, "wb") as outfile:
encrypt_bigfile(infile, outfile, self.his_pubkey)
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.encrypt_binary | python | def encrypt_binary(self, binary, *args, **kwargs):
return self.encrypt(binary, *args, **kwargs) | input: bytes, output: bytes | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L55-L59 | [
"def encrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your encrypt algorithm.\n\n :param binary: binary data you need to encrypt\n :return: encrypted_binary, encrypted binary data\n \"\"\"\n raise NotImplementedError\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.decrypt_binary | python | def decrypt_binary(self, binary, *args, **kwargs):
return self.decrypt(binary, *args, **kwargs) | input: bytes, output: bytes | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L61-L65 | [
"def decrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your decrypt algorithm.\n\n :param binary: binary data you need to decrypt\n :return: decrypted_binary, decrypted binary data\n \"\"\"\n raise NotImplementedError\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.encrypt_text | python | def encrypt_text(self, text, *args, **kwargs):
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8") | Encrypt a string.
input: unicode str, output: unicode str | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L67-L75 | [
"def encrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your encrypt algorithm.\n\n :param binary: binary data you need to encrypt\n :return: encrypted_binary, encrypted binary data\n \"\"\"\n raise NotImplementedError\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.decrypt_text | python | def decrypt_text(self, text, *args, **kwargs):
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8") | Decrypt a string.
input: unicode str, output: unicode str | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L77-L85 | [
"def decrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your decrypt algorithm.\n\n :param binary: binary data you need to decrypt\n :return: decrypted_binary, decrypted binary data\n \"\"\"\n raise NotImplementedError\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher._show | python | def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
if enable_verbose:
print(" " * indent + message) | Message printer. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L87-L91 | null | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.encrypt_file | python | def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path | Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L93-L125 | [
"def process_dst_overwrite_args(src,\n dst=None,\n overwrite=True,\n src_to_dst_func=None):\n src = os.path.abspath(src)\n\n if dst is None:\n dst = src_to_dst_func(src)\n\n if not overwrite:\n if os.path.exists(dst):\n raise EnvironmentError(\n \"output path '%s' already exists..\" % dst)\n\n return src, dst\n",
"def transform(src, dst, converter,\n overwrite=False, stream=True, chunksize=1024**2, **kwargs):\n \"\"\"\n A file stream transform IO utility function.\n\n :param src: original file path\n :param dst: destination file path\n :param converter: binary content converter function\n :param overwrite: default False,\n :param stream: default True, if True, use stream IO mode, chunksize has to\n be specified.\n :param chunksize: default 1MB\n \"\"\"\n if not overwrite: # pragma: no cover\n if Path(dst).exists():\n raise EnvironmentError(\"'%s' already exists!\" % dst)\n\n with open(src, \"rb\") as f_input:\n with open(dst, \"wb\") as f_output:\n if stream:\n # fix chunksize to a reasonable range\n if chunksize > 1024 ** 2 * 10:\n chunksize = 1024 ** 2 * 10\n elif chunksize < 1024 ** 2:\n chunksize = 1024 ** 2\n\n # write file\n while 1:\n content = f_input.read(chunksize)\n if content:\n f_output.write(converter(content, **kwargs))\n else:\n break\n else: # pragma: no cover\n f_output.write(converter(f_input.read(), **kwargs))\n",
"def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover\n \"\"\"Message printer.\n \"\"\"\n if enable_verbose:\n print(\" \" * indent + message)\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.decrypt_file | python | def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path | Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L127-L159 | [
"def process_dst_overwrite_args(src,\n dst=None,\n overwrite=True,\n src_to_dst_func=None):\n src = os.path.abspath(src)\n\n if dst is None:\n dst = src_to_dst_func(src)\n\n if not overwrite:\n if os.path.exists(dst):\n raise EnvironmentError(\n \"output path '%s' already exists..\" % dst)\n\n return src, dst\n",
"def transform(src, dst, converter,\n overwrite=False, stream=True, chunksize=1024**2, **kwargs):\n \"\"\"\n A file stream transform IO utility function.\n\n :param src: original file path\n :param dst: destination file path\n :param converter: binary content converter function\n :param overwrite: default False,\n :param stream: default True, if True, use stream IO mode, chunksize has to\n be specified.\n :param chunksize: default 1MB\n \"\"\"\n if not overwrite: # pragma: no cover\n if Path(dst).exists():\n raise EnvironmentError(\"'%s' already exists!\" % dst)\n\n with open(src, \"rb\") as f_input:\n with open(dst, \"wb\") as f_output:\n if stream:\n # fix chunksize to a reasonable range\n if chunksize > 1024 ** 2 * 10:\n chunksize = 1024 ** 2 * 10\n elif chunksize < 1024 ** 2:\n chunksize = 1024 ** 2\n\n # write file\n while 1:\n content = f_input.read(chunksize)\n if content:\n f_output.write(converter(content, **kwargs))\n else:\n break\n else: # pragma: no cover\n f_output.write(converter(f_input.read(), **kwargs))\n",
"def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover\n \"\"\"Message printer.\n \"\"\"\n if enable_verbose:\n print(\" \" * indent + message)\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/cipher.py | BaseCipher.encrypt_dir | python | def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path | Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L161-L198 | [
"def process_dst_overwrite_args(src,\n dst=None,\n overwrite=True,\n src_to_dst_func=None):\n src = os.path.abspath(src)\n\n if dst is None:\n dst = src_to_dst_func(src)\n\n if not overwrite:\n if os.path.exists(dst):\n raise EnvironmentError(\n \"output path '%s' already exists..\" % dst)\n\n return src, dst\n",
"def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover\n \"\"\"Message printer.\n \"\"\"\n if enable_verbose:\n print(\" \" * indent + message)\n",
"def encrypt_file(self,\n path,\n output_path=None,\n overwrite=False,\n stream=True,\n enable_verbose=True,\n **kwargs):\n \"\"\"\n Encrypt a file. If output_path are not given, then try to use the\n path with a surfix appended. The default automatical file path handling\n is defined here :meth:`windtalker.files.get_encrypted_file_path`\n\n :param path: path of the file you need to encrypt\n :param output_path: encrypted file output path\n :param overwrite: if True, then silently overwrite output file if exists\n :param stream: if it is a very big file, stream mode can avoid using\n too much memory\n :param enable_verbose: boolean, trigger on/off the help information\n \"\"\"\n path, output_path = files.process_dst_overwrite_args(\n src=path, dst=output_path, overwrite=overwrite,\n src_to_dst_func=files.get_encrpyted_path,\n )\n\n self._show(\"Encrypt '%s' ...\" % path, enable_verbose=enable_verbose)\n st = time.clock()\n files.transform(path, output_path, converter=self.encrypt,\n overwrite=overwrite, stream=stream,\n chunksize=self._encrypt_chunk_size)\n self._show(\" Finished! Elapse %.6f seconds\" % (time.clock() - st,),\n enable_verbose=enable_verbose)\n\n return output_path\n"
] | class BaseCipher(object):
"""
Base cipher class.
Any cipher utility class that using any encryption algorithm can be
inherited from this base class. The only method you need to implement is
:meth:`BaseCipher.encrypt` and :meth:`BaseCipher.decrypt`. Because once you
can encrypt binary data, then you can encrypt text, file, and directory.
"""
_encrypt_chunk_size = 1024
_decrypt_chunk_size = 1024
def b64encode_str(self, text):
"""
base64 encode a text, return string also.
"""
return base64.b64encode(text.encode("utf-8")).decode("utf-8")
def b64decode_str(self, text):
"""
base64 decode a text, return string also.
"""
return base64.b64decode(text.encode("utf-8")).decode("utf-8")
def encrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your encrypt algorithm.
:param binary: binary data you need to encrypt
:return: encrypted_binary, encrypted binary data
"""
raise NotImplementedError
def decrypt(self, binary, *args, **kwargs):
"""
Overwrite this method using your decrypt algorithm.
:param binary: binary data you need to decrypt
:return: decrypted_binary, decrypted binary data
"""
raise NotImplementedError
def encrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.encrypt(binary, *args, **kwargs)
def decrypt_binary(self, binary, *args, **kwargs):
"""
input: bytes, output: bytes
"""
return self.decrypt(binary, *args, **kwargs)
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8")
def decrypt_text(self, text, *args, **kwargs):
"""
Decrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = base64.b64decode(b)
return self.decrypt(token, *args, **kwargs).decode("utf-8")
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover
"""Message printer.
"""
if enable_verbose:
print(" " * indent + message)
def encrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Encrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.get_encrypted_file_path`
:param path: path of the file you need to encrypt
:param output_path: encrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("Encrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.encrypt,
overwrite=overwrite, stream=stream,
chunksize=self._encrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_file(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True,
**kwargs):
"""
Decrypt a file. If output_path are not given, then try to use the
path with a surfix appended. The default automatical file path handling
is defined here :meth:`windtalker.files.recover_path`
:param path: path of the file you need to decrypt
:param output_path: decrypted file output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("Decrypt '%s' ..." % path, enable_verbose=enable_verbose)
st = time.clock()
files.transform(path, output_path, converter=self.decrypt,
overwrite=overwrite, stream=stream,
chunksize=self._decrypt_chunk_size)
self._show(" Finished! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
def decrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Decrypt everything in a directory.
:param path: path of the dir you need to decrypt
:param output_path: decrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_decrpyted_path,
)
self._show("--- Decrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.decrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path
|
MacHu-GWU/windtalker-project | windtalker/symmetric.py | SymmetricCipher.any_text_to_fernet_key | python | def any_text_to_fernet_key(self, text):
md5 = fingerprint.fingerprint.of_text(text)
fernet_key = base64.b64encode(md5.encode("utf-8"))
return fernet_key | Convert any text to a fernet key for encryption. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/symmetric.py#L56-L62 | [
"def of_text(self, text, encoding=\"utf-8\"):\n \"\"\"Use default hash method to return hash value of a piece of string\n default setting use 'utf-8' encoding.\n \"\"\"\n m = self.hash_algo()\n m.update(text.encode(encoding))\n if self.return_int:\n return int(m.hexdigest(), 16)\n else:\n return m.hexdigest()\n"
] | class SymmetricCipher(Fernet, BaseCipher):
"""
A symmetric encryption algorithm utility class helps you easily
encrypt/decrypt text, files and even a directory.
:param password: The secret password you use to encrypt all your message.
If you feel uncomfortable to put that in your code, you can leave it
empty. The system will ask you manually enter that later.
**中文文档**
对称加密器。
"""
_encrypt_chunk_size = 1024 * 1024 # 1 MB
_decrypt_chunk_size = 1398200 # 1.398 MB
"""Symmtric algorithm needs to break big files in small chunk, and encrypt
them one by one, and concatenate them at the end. Each chunk has a fixed
size. That's what these two attributes for.
"""
def __init__(self, password=None):
if password:
fernet_key = self.any_text_to_fernet_key(password)
super(SymmetricCipher, self).__init__(fernet_key)
else: # pragma: no cover
if WINDTALKER_CONFIG_FILE.exists():
self.set_password(read_windtalker_password())
else:
self.input_password()
def input_password(self): # pragma: no cover
"""
Manually enter a password for encryption on keyboard.
"""
password = input("Please enter your secret key (case sensitive): ")
self.set_password(password)
def set_password(self, password):
"""
Set a new password for encryption.
"""
self.__init__(password)
def set_encrypt_chunk_size(self, size):
if 1024 * 1024 < size < 100 * 1024 * 1024:
self._encrypt_chunk_size = size
self._decrypt_chunk_size = len(self.encrypt(b"x" * size))
else:
print("encrypt chunk size has to be between 1MB and 100MB")
@property
def metadata(self):
return {
"_encrypt_chunk_size": self._encrypt_chunk_size,
"_decrypt_chunk_size": self._decrypt_chunk_size,
}
def encrypt(self, binary):
"""
Encrypt binary data.
"""
return super(SymmetricCipher, self).encrypt(binary)
def decrypt(self, binary):
"""
Decrypt binary data.
"""
try:
return super(SymmetricCipher, self).decrypt(binary)
except:
raise PasswordError("Opps, wrong magic word!")
|
MacHu-GWU/windtalker-project | windtalker/files.py | get_encrpyted_path | python | def get_encrpyted_path(original_path, surfix=default_surfix):
p = Path(original_path).absolute()
encrypted_p = p.change(new_fname=p.fname + surfix)
return encrypted_p.abspath | Find the output encrypted file /dir path (by adding a surfix).
Example:
- file: ``${home}/test.txt`` -> ``${home}/test-encrypted.txt``
- dir: ``${home}/Documents`` -> ``${home}/Documents-encrypted`` | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/files.py#L12-L23 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from pathlib_mate import Path
default_surfix = "-encrypted" # windtalker secret
def get_decrpyted_path(encrypted_path, surfix=default_surfix):
"""
Find the original path of encrypted file or dir.
Example:
- file: ``${home}/test-encrypted.txt`` -> ``${home}/test.txt``
- dir: ``${home}/Documents-encrypted`` -> ``${home}/Documents``
"""
surfix_reversed = surfix[::-1]
p = Path(encrypted_path).absolute()
fname = p.fname
fname_reversed = fname[::-1]
new_fname = fname_reversed.replace(surfix_reversed, "", 1)[::-1]
decrypted_p = p.change(new_fname=new_fname)
return decrypted_p.abspath
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
"""
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
"""
if not overwrite: # pragma: no cover
if Path(dst).exists():
raise EnvironmentError("'%s' already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
# fix chunksize to a reasonable range
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
# write file
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else: # pragma: no cover
f_output.write(converter(f_input.read(), **kwargs))
def process_dst_overwrite_args(src,
dst=None,
overwrite=True,
src_to_dst_func=None):
src = os.path.abspath(src)
if dst is None:
dst = src_to_dst_func(src)
if not overwrite:
if os.path.exists(dst):
raise EnvironmentError(
"output path '%s' already exists.." % dst)
return src, dst
|
MacHu-GWU/windtalker-project | windtalker/files.py | get_decrpyted_path | python | def get_decrpyted_path(encrypted_path, surfix=default_surfix):
surfix_reversed = surfix[::-1]
p = Path(encrypted_path).absolute()
fname = p.fname
fname_reversed = fname[::-1]
new_fname = fname_reversed.replace(surfix_reversed, "", 1)[::-1]
decrypted_p = p.change(new_fname=new_fname)
return decrypted_p.abspath | Find the original path of encrypted file or dir.
Example:
- file: ``${home}/test-encrypted.txt`` -> ``${home}/test.txt``
- dir: ``${home}/Documents-encrypted`` -> ``${home}/Documents`` | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/files.py#L26-L42 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from pathlib_mate import Path
default_surfix = "-encrypted" # windtalker secret
def get_encrpyted_path(original_path, surfix=default_surfix):
"""
Find the output encrypted file /dir path (by adding a surfix).
Example:
- file: ``${home}/test.txt`` -> ``${home}/test-encrypted.txt``
- dir: ``${home}/Documents`` -> ``${home}/Documents-encrypted``
"""
p = Path(original_path).absolute()
encrypted_p = p.change(new_fname=p.fname + surfix)
return encrypted_p.abspath
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
"""
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
"""
if not overwrite: # pragma: no cover
if Path(dst).exists():
raise EnvironmentError("'%s' already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
# fix chunksize to a reasonable range
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
# write file
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else: # pragma: no cover
f_output.write(converter(f_input.read(), **kwargs))
def process_dst_overwrite_args(src,
dst=None,
overwrite=True,
src_to_dst_func=None):
src = os.path.abspath(src)
if dst is None:
dst = src_to_dst_func(src)
if not overwrite:
if os.path.exists(dst):
raise EnvironmentError(
"output path '%s' already exists.." % dst)
return src, dst
|
MacHu-GWU/windtalker-project | windtalker/files.py | transform | python | def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
if not overwrite: # pragma: no cover
if Path(dst).exists():
raise EnvironmentError("'%s' already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
# fix chunksize to a reasonable range
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
# write file
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else: # pragma: no cover
f_output.write(converter(f_input.read(), **kwargs)) | A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/files.py#L45-L79 | [
"def encrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your encrypt algorithm.\n\n :param binary: binary data you need to encrypt\n :return: encrypted_binary, encrypted binary data\n \"\"\"\n raise NotImplementedError\n",
"def decrypt(self, binary, *args, **kwargs):\n \"\"\"\n Overwrite this method using your decrypt algorithm.\n\n :param binary: binary data you need to decrypt\n :return: decrypted_binary, decrypted binary data\n \"\"\"\n raise NotImplementedError\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from pathlib_mate import Path
default_surfix = "-encrypted" # windtalker secret
def get_encrpyted_path(original_path, surfix=default_surfix):
"""
Find the output encrypted file /dir path (by adding a surfix).
Example:
- file: ``${home}/test.txt`` -> ``${home}/test-encrypted.txt``
- dir: ``${home}/Documents`` -> ``${home}/Documents-encrypted``
"""
p = Path(original_path).absolute()
encrypted_p = p.change(new_fname=p.fname + surfix)
return encrypted_p.abspath
def get_decrpyted_path(encrypted_path, surfix=default_surfix):
"""
Find the original path of encrypted file or dir.
Example:
- file: ``${home}/test-encrypted.txt`` -> ``${home}/test.txt``
- dir: ``${home}/Documents-encrypted`` -> ``${home}/Documents``
"""
surfix_reversed = surfix[::-1]
p = Path(encrypted_path).absolute()
fname = p.fname
fname_reversed = fname[::-1]
new_fname = fname_reversed.replace(surfix_reversed, "", 1)[::-1]
decrypted_p = p.change(new_fname=new_fname)
return decrypted_p.abspath
def process_dst_overwrite_args(src,
dst=None,
overwrite=True,
src_to_dst_func=None):
src = os.path.abspath(src)
if dst is None:
dst = src_to_dst_func(src)
if not overwrite:
if os.path.exists(dst):
raise EnvironmentError(
"output path '%s' already exists.." % dst)
return src, dst
|
MacHu-GWU/windtalker-project | windtalker/fingerprint.py | FingerPrint.of_bytes | python | def of_bytes(self, py_bytes):
m = self.hash_algo()
m.update(py_bytes)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest() | Use default hash method to return hash value of bytes. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/fingerprint.py#L103-L111 | null | class FingerPrint(object):
"""A hashlib wrapper class allow you to use one line to do hash as you wish.
Usage::
>>> from fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
"""
_mapper = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
def __init__(self, algorithm="md5", pk_protocol=pk_protocol):
self.use(algorithm)
self.set_return_str()
self.set_pickle_protocol(pk_protocol)
def digest_to_int(self, digest):
"""Convert hexdigest str to int.
"""
return int(digest, 16)
def use(self, algorithm):
"""Change the hash algorithm you gonna use.
"""
try:
self.hash_algo = self._mapper[algorithm.strip().lower()]
except IndexError:
template = "'%s' is not supported, try one of %s."
raise ValueError(template % (algorithm, list(self._mapper)))
def set_return_int(self):
"""Set to return hex integer.
"""
self.return_int = True
def set_return_str(self):
"""Set to return hex string.
"""
self.return_int = False
def set_pickle_protocol(self, pk_protocol):
"""Set pickle protocol.
"""
if pk_protocol not in [2, 3]:
raise ValueError("pickle protocol has to be 2 or 3!")
self.pk_protocol = pk_protocol
def of_text(self, text, encoding="utf-8"):
"""Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
"""
m = self.hash_algo()
m.update(text.encode(encoding))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_pyobj(self, pyobj):
"""Use default hash method to return hash value of a piece of Python
picklable object.
"""
m = self.hash_algo()
m.update(pickle.dumps(pyobj, protocol=self.pk_protocol))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_file(self, abspath, nbytes=0, chunk_size=1024):
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file.
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self.hash_algo()
with open(abspath, "rb") as f:
if nbytes: # use first n bytes
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest()
|
MacHu-GWU/windtalker-project | windtalker/fingerprint.py | FingerPrint.of_text | python | def of_text(self, text, encoding="utf-8"):
m = self.hash_algo()
m.update(text.encode(encoding))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest() | Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/fingerprint.py#L113-L122 | null | class FingerPrint(object):
"""A hashlib wrapper class allow you to use one line to do hash as you wish.
Usage::
>>> from fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
"""
_mapper = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
def __init__(self, algorithm="md5", pk_protocol=pk_protocol):
self.use(algorithm)
self.set_return_str()
self.set_pickle_protocol(pk_protocol)
def digest_to_int(self, digest):
"""Convert hexdigest str to int.
"""
return int(digest, 16)
def use(self, algorithm):
"""Change the hash algorithm you gonna use.
"""
try:
self.hash_algo = self._mapper[algorithm.strip().lower()]
except IndexError:
template = "'%s' is not supported, try one of %s."
raise ValueError(template % (algorithm, list(self._mapper)))
def set_return_int(self):
"""Set to return hex integer.
"""
self.return_int = True
def set_return_str(self):
"""Set to return hex string.
"""
self.return_int = False
def set_pickle_protocol(self, pk_protocol):
"""Set pickle protocol.
"""
if pk_protocol not in [2, 3]:
raise ValueError("pickle protocol has to be 2 or 3!")
self.pk_protocol = pk_protocol
def of_bytes(self, py_bytes):
"""Use default hash method to return hash value of bytes.
"""
m = self.hash_algo()
m.update(py_bytes)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_pyobj(self, pyobj):
"""Use default hash method to return hash value of a piece of Python
picklable object.
"""
m = self.hash_algo()
m.update(pickle.dumps(pyobj, protocol=self.pk_protocol))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_file(self, abspath, nbytes=0, chunk_size=1024):
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file.
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self.hash_algo()
with open(abspath, "rb") as f:
if nbytes: # use first n bytes
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest()
|
MacHu-GWU/windtalker-project | windtalker/fingerprint.py | FingerPrint.of_pyobj | python | def of_pyobj(self, pyobj):
m = self.hash_algo()
m.update(pickle.dumps(pyobj, protocol=self.pk_protocol))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest() | Use default hash method to return hash value of a piece of Python
picklable object. | train | https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/fingerprint.py#L124-L133 | null | class FingerPrint(object):
"""A hashlib wrapper class allow you to use one line to do hash as you wish.
Usage::
>>> from fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
"""
_mapper = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
def __init__(self, algorithm="md5", pk_protocol=pk_protocol):
self.use(algorithm)
self.set_return_str()
self.set_pickle_protocol(pk_protocol)
def digest_to_int(self, digest):
"""Convert hexdigest str to int.
"""
return int(digest, 16)
def use(self, algorithm):
"""Change the hash algorithm you gonna use.
"""
try:
self.hash_algo = self._mapper[algorithm.strip().lower()]
except IndexError:
template = "'%s' is not supported, try one of %s."
raise ValueError(template % (algorithm, list(self._mapper)))
def set_return_int(self):
"""Set to return hex integer.
"""
self.return_int = True
def set_return_str(self):
"""Set to return hex string.
"""
self.return_int = False
def set_pickle_protocol(self, pk_protocol):
"""Set pickle protocol.
"""
if pk_protocol not in [2, 3]:
raise ValueError("pickle protocol has to be 2 or 3!")
self.pk_protocol = pk_protocol
def of_bytes(self, py_bytes):
"""Use default hash method to return hash value of bytes.
"""
m = self.hash_algo()
m.update(py_bytes)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_text(self, text, encoding="utf-8"):
"""Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
"""
m = self.hash_algo()
m.update(text.encode(encoding))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_file(self, abspath, nbytes=0, chunk_size=1024):
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file.
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self.hash_algo()
with open(abspath, "rb") as f:
if nbytes: # use first n bytes
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest()
|
MaT1g3R/option | option/option_.py | Option.maybe | python | def maybe(cls, val: Optional[T]) -> 'Option[T]':
return cast('Option[T]', NONE) if val is None else cls.Some(val) | Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L85-L101 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.expect | python | def expect(self, msg) -> T:
if self._is_some:
return self._val
raise ValueError(msg) | Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No! | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L144-L168 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.unwrap_or | python | def unwrap_or(self, default: U) -> Union[T, U]:
return self.unwrap_or_else(lambda: default) | Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L198-L219 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.unwrap_or_else | python | def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
return self._val if self._is_some else callback() | Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha' | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L221-L238 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.map | python | def map(self, callback: Callable[[T], U]) -> 'Option[U]':
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE) | Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L240-L258 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.map_or | python | def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
return callback(self._val) if self._is_some else default | Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L260-L282 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.flatmap | python | def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
return callback(self._val) if self._is_some else cast('Option[U]', NONE) | Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L305-L334 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.filter | python | def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE) | Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L336-L359 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/option_.py | Option.get | python | def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default) | Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L361-L392 | null | class Option(Generic[T]):
"""
:py:class:`Option` represents an optional value. Every :py:class:`Option`
is either ``Some`` and contains a value, or :py:data:`NONE` and
does not.
To create a ``Some`` value, please use :py:meth:`Option.Some` or :py:func:`Some`.
To create a :py:data:`NONE` value, please use :py:meth:`Option.NONE` or import the
constant :py:data:`NONE` directly.
To let :py:class:`Option` guess the type of :py:class:`Option` to create,
please use :py:meth:`Option.maybe` or :py:func:`maybe`.
Calling the ``__init__`` method directly will raise a ``TypeError``.
Examples:
>>> Option.Some(1)
Some(1)
>>> Option.NONE()
NONE
>>> Option.maybe(1)
Some(1)
>>> Option.maybe(None)
NONE
"""
__slots__ = ('_val', '_is_some', '_type')
def __init__(self, value: T, is_some: bool, *, _force: bool = False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = value
self._is_some = is_some
self._type = type(self)
@classmethod
def Some(cls, val: T) -> 'Option[T]':
"""Some value ``val``."""
return cls(val, True, _force=True)
@classmethod
def NONE(cls) -> 'Option[T]':
"""No Value."""
return cast('Option[T]', NONE)
@classmethod
def maybe(cls, val: Optional[T]) -> 'Option[T]':
"""
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)
NONE
"""
return cast('Option[T]', NONE) if val is None else cls.Some(val)
def __bool__(self):
"""
Returns the truth value of the :py:class:`Option` based on its value.
Returns:
True if the :py:class:`Option` is ``Some`` value, otherwise False.
Examples:
>>> bool(Some(False))
True
>>> bool(NONE)
False
"""
return self._is_some
@property
def is_some(self) -> bool:
"""
Returns ``True`` if the option is a ``Some`` value.
Examples:
>>> Some(0).is_some
True
>>> NONE.is_some
False
"""
return self._is_some
@property
def is_none(self) -> bool:
"""
Returns ``True`` if the option is a :py:data:`NONE` value.
Examples:
>>> Some(0).is_none
False
>>> NONE.is_none
True
"""
return not self._is_some
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
def unwrap(self) -> T:
"""
Returns the value in the :py:class:`Option` if it is ``Some``.
Returns:
The ```Some`` value of the :py:class:`Option`.
Raises:
``ValueError`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).unwrap()
0
>>> try:
... NONE.unwrap()
... except ValueError as e:
... print(e)
Value is NONE.
"""
return self.value
@property
def value(self) -> T:
"""Property version of :py:meth:`unwrap`."""
if self._is_some:
return self._val
raise ValueError('Value is NONE.')
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default)
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
"""
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 111)
0
>>> NONE.unwrap_or_else(lambda: 'ha')
'ha'
"""
return self._val if self._is_some else callback()
def map(self, callback: Callable[[T], U]) -> 'Option[U]':
"""
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, otherwise :py:data:`NONE`
Examples:
>>> Some(10).map(lambda x: x * x)
Some(100)
>>> NONE.map(lambda x: x * x)
NONE
"""
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
def map_or(self, callback: Callable[[T], U], default: A) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
"""
return callback(self._val) if self._is_some else default
def map_or_else(self, callback: Callable[[T], U], default: Callable[[], A]) -> Union[U, A]:
"""
Applies the ``callback`` to the contained value or computes a default
with ``default``.
Args:
callback: The callback to apply to the contained value.
default: The callback fot the default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise the result of ``default``.
Examples:
>>> Some(0).map_or_else(lambda x: x * x, lambda: 1)
0
>>> NONE.map_or_else(lambda x: x * x, lambda: 1)
1
"""
return callback(self._val) if self._is_some else default()
def flatmap(self, callback: 'Callable[[T], Option[U]]') -> 'Option[U]':
"""
Applies the callback to the contained value if the option
is not :py:data:`NONE`.
This is different than :py:meth:`Option.map` because the result
of the callback isn't wrapped in a new :py:class:`Option`
Args:
callback: The callback to apply to the contained value.
Returns:
:py:data:`NONE` if the option is :py:data:`NONE`.
otherwise calls `callback` with the contained value and
returns the result.
Examples:
>>> def square(x): return Some(x * x)
>>> def nope(x): return NONE
>>> Some(2).flatmap(square).flatmap(square)
Some(16)
>>> Some(2).flatmap(square).flatmap(nope)
NONE
>>> Some(2).flatmap(nope).flatmap(square)
NONE
>>> NONE.flatmap(square).flatmap(square)
NONE
"""
return callback(self._val) if self._is_some else cast('Option[U]', NONE)
def filter(self, predicate: Callable[[T], bool]) -> 'Option[T]':
"""
Returns :py:data:`NONE` if the :py:class:`Option` is :py:data:`NONE`,
otherwise filter the contained value by ``predicate``.
Args:
predicate: The fitler function.
Returns:
:py:data:`NONE` if the contained value is :py:data:`NONE`, otherwise:
* The option itself if the predicate returns True
* :py:data:`NONE` if the predicate returns False
Examples:
>>> Some(0).filter(lambda x: x % 2 == 1)
NONE
>>> Some(1).filter(lambda x: x % 2 == 1)
Some(1)
>>> NONE.filter(lambda x: True)
NONE
"""
if self._is_some and predicate(self._val):
return self
return cast('Option[T]', NONE)
def __hash__(self):
return hash((self.__class__, self._is_some, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_some == other._is_some
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_some != other._is_some
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val < other._val if self._is_some else False
else:
return other._is_some
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val <= other._val if self._is_some else True
return other._is_some
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val > other._val if self._is_some else False
else:
return self._is_some
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_some == other._is_some:
return self._val >= other._val if self._is_some else True
return self._is_some
return NotImplemented
def __repr__(self):
return 'NONE' if self.is_none else f'Some({self._val!r})'
|
MaT1g3R/option | option/result.py | Result.ok | python | def ok(self) -> Option[T]:
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE) | Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L128-L142 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.err | python | def err(self) -> Option[E]:
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val)) | Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1) | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L144-L158 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.map | python | def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self | Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1) | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L160-L178 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.flatmap | python | def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self) | Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3) | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L180-L206 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.map_err | python | def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
) | Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2) | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L208-L229 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.unwrap | python | def unwrap(self) -> T:
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val) | Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L231-L253 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.unwrap_or | python | def unwrap_or(self, optb: T) -> T:
return cast(T, self._val) if self._is_ok else optb | Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L255-L276 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.unwrap_or_else | python | def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
return cast(T, self._val) if self._is_ok else op(cast(E, self._val)) | Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L278-L296 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.expect | python | def expect(self, msg) -> T:
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg) | Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L298-L325 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.unwrap_err | python | def unwrap_err(self) -> E:
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val) | Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No' | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L327-L350 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def expect_err(self, msg) -> E:
"""
Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1
"""
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
MaT1g3R/option | option/result.py | Result.expect_err | python | def expect_err(self, msg) -> E:
if self._is_ok:
raise ValueError(msg)
return cast(E, self._val) | Returns the error value in a :class:`Result`, or raises a
``ValueError`` with the provided message.
Args:
msg: The error message.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by ``msg`` if
the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).expect_err('Oh No')
... except ValueError as e:
... print(e)
Oh No
>>> Err(1).expect_err('Yes')
1 | train | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L352-L379 | null | class Result(Generic[T, E]):
"""
:class:`Result` is a type that either success (:meth:`Result.Ok`)
or failure (:meth:`Result.Err`).
To create an Ok value, use :meth:`Result.Ok` or :func:`Ok`.
To create a Err value, use :meth:`Result.Err` or :func:`Err`.
Calling the :class:`Result` constructor directly will raise a ``TypeError``.
Examples:
>>> Result.Ok(1)
Ok(1)
>>> Result.Err('Fail!')
Err('Fail!')
"""
__slots__ = ('_val', '_is_ok', '_type')
def __init__(self, val: Union[T, E], is_ok: bool, *, _force=False) -> None:
if not _force:
raise TypeError(
'Cannot directly initialize, '
'please use one of the factory functions instead.'
)
self._val = val
self._is_ok = is_ok
self._type = type(self)
@classmethod
def Ok(cls, val: T) -> 'Result[T, Any]':
"""
Contains the success value.
Args:
val: The success value.
Returns:
The :class:`Result` containing the success value.
Examples:
>>> res = Result.Ok(1)
>>> res
Ok(1)
>>> res.is_ok
True
"""
return cls(val, True, _force=True)
@classmethod
def Err(cls, err: E) -> 'Result[Any, E]':
"""
Contains the error value.
Args:
err: The error value.
Returns:
The :class:`Result` containing the error value.
Examples:
>>> res = Result.Err('Oh No')
>>> res
Err('Oh No')
>>> res.is_err
True
"""
return cls(err, False, _force=True)
def __bool__(self):
return self._is_ok
@property
def is_ok(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Ok`.
Examples:
>>> Ok(1).is_ok
True
>>> Err(1).is_ok
False
"""
return self._is_ok
@property
def is_err(self) -> bool:
"""
Returns `True` if the result is :meth:`Result.Err`.
Examples:
>>> Ok(1).is_err
False
>>> Err(1).is_err
True
"""
return not self._is_ok
def ok(self) -> Option[T]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [T].
Returns:
:class:`Option` containing the success value if `self` is
:meth:`Result.Ok`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).ok()
Some(1)
>>> Err(1).ok()
NONE
"""
return Option.Some(cast(T, self._val)) if self._is_ok else cast(Option[T], NONE)
def err(self) -> Option[E]:
"""
Converts from :class:`Result` [T, E] to :class:`option.option_.Option` [E].
Returns:
:class:`Option` containing the error value if `self` is
:meth:`Result.Err`, otherwise :data:`option.option_.NONE`.
Examples:
>>> Ok(1).err()
NONE
>>> Err(1).err()
Some(1)
"""
return cast(Option[E], NONE) if self._is_ok else Option.Some(cast(E, self._val))
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
Args:
op: The function to apply to the :meth:`Result.Ok` value.
Returns:
A :class:`Result` with its success value as the function result
if `self` is an :meth:`Result.Ok` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map(lambda x: x * 2)
Ok(2)
>>> Err(1).map(lambda x: x * 2)
Err(1)
"""
return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
def flatmap(self, op: 'Callable[[T], Result[U, E]]') -> 'Result[U, E]':
"""
Applies a function to the contained :meth:`Result.Ok` value.
This is different than :meth:`Result.map` because the function
result is not wrapped in a new :class:`Result`.
Args:
op: The function to apply to the contained :meth:`Result.Ok` value.
Returns:
The result of the function if `self` is an :meth:`Result.Ok` value,
otherwise returns `self`.
Examples:
>>> def sq(x): return Ok(x * x)
>>> def err(x): return Err(x)
>>> Ok(2).flatmap(sq).flatmap(sq)
Ok(16)
>>> Ok(2).flatmap(sq).flatmap(err)
Err(4)
>>> Ok(2).flatmap(err).flatmap(sq)
Err(2)
>>> Err(3).flatmap(sq).flatmap(sq)
Err(3)
"""
return op(cast(T, self._val)) if self._is_ok else cast('Result[U, E]', self)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':
"""
Applies a function to the contained :meth:`Result.Err` value.
Args:
op: The function to apply to the :meth:`Result.Err` value.
Returns:
A :class:`Result` with its error value as the function result
if `self` is a :meth:`Result.Err` value, otherwise returns
`self`.
Examples:
>>> Ok(1).map_err(lambda x: x * 2)
Ok(1)
>>> Err(1).map_err(lambda x: x * 2)
Err(2)
"""
return self if self._is_ok else cast(
'Result[T, F]',
self._type.Err(op(cast(E, self._val)))
)
def unwrap(self) -> T:
"""
Returns the success value in the :class:`Result`.
Returns:
The success value in the :class:`Result`.
Raises:
``ValueError`` with the message provided by the error value
if the :class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).unwrap()
1
>>> try:
... Err(1).unwrap()
... except ValueError as e:
... print(e)
1
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(self._val)
def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
def expect(self, msg) -> T:
"""
Returns the success value in the :class:`Result` or raises
a ``ValueError`` with a provided message.
Args:
msg: The error message.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value.
Raises:
``ValueError`` with ``msg`` as the message if the
:class:`Result` is a :meth:`Result.Err` value.
Examples:
>>> Ok(1).expect('no')
1
>>> try:
... Err(1).expect('no')
... except ValueError as e:
... print(e)
no
"""
if self._is_ok:
return cast(T, self._val)
raise ValueError(msg)
def unwrap_err(self) -> E:
"""
Returns the error value in a :class:`Result`.
Returns:
The error value in the :class:`Result` if it is a
:meth:`Result.Err` value.
Raises:
``ValueError`` with the message provided by the success value
if the :class:`Result` is a :meth:`Result.Ok` value.
Examples:
>>> try:
... Ok(1).unwrap_err()
... except ValueError as e:
... print(e)
1
>>> Err('Oh No').unwrap_err()
'Oh No'
"""
if self._is_ok:
raise ValueError(self._val)
return cast(E, self._val)
def __repr__(self):
return f'Ok({self._val!r})' if self._is_ok else f'Err({self._val!r})'
def __hash__(self):
return hash((self._type, self._is_ok, self._val))
def __eq__(self, other):
return (isinstance(other, self._type)
and self._is_ok == other._is_ok
and self._val == other._val)
def __ne__(self, other):
return (not isinstance(other, self._type)
or self._is_ok != other._is_ok
or self._val != other._val)
def __lt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val < other._val
return self._is_ok
return NotImplemented
def __le__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val <= other._val
return self._is_ok
return NotImplemented
def __gt__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val > other._val
return other._is_ok
return NotImplemented
def __ge__(self, other):
if isinstance(other, self._type):
if self._is_ok == other._is_ok:
return self._val >= other._val
return other._is_ok
return NotImplemented
|
tjomasc/snekbol | snekbol/document.py | Document.add_component_definition | python | def add_component_definition(self, definition):
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity)) | Add a ComponentDefinition to the document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L79-L87 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.assemble_component | python | def assemble_component(self, into_component, using_components):
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations | Assemble a list of already defined components into a structual hirearchy | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L114-L168 | [
"def _add_sequence(self, sequence):\n \"\"\"\n Add a Sequence to the document\n \"\"\"\n if sequence.identity not in self._sequences.keys():\n self._sequences[sequence.identity] = sequence\n else:\n raise ValueError(\"{} has already been defined\".format(sequence.identity))\n"
] | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._add_sequence | python | def _add_sequence(self, sequence):
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity)) | Add a Sequence to the document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L170-L177 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.add_model | python | def add_model(self, model):
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity)) | Add a model to the document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L179-L186 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.add_module_definition | python | def add_module_definition(self, module_definition):
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity)) | Add a ModuleDefinition to the document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L203-L210 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.get_components | python | def get_components(self, uri):
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences] | Get components from a component definition in order | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L233-L244 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.clear_document | python | def clear_document(self):
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear() | Clears ALL items from document, reseting it to clean | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L246-L258 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._get_triplet_value | python | def _get_triplet_value(self, graph, identity, rdf_type):
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value | Get a value from an RDF triple | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L263-L268 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._get_triplet_value_list | python | def _get_triplet_value_list(self, graph, identity, rdf_type):
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values | Get a list of values from RDF triples when more than one may be present | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L270-L277 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_sequences | python | def _read_sequences(self, graph):
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq | Read graph and add sequences to document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L306-L317 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_component_definitions | python | def _read_component_definitions(self, graph):
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj | Read graph and add component defintions to document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L319-L331 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._extend_component_definitions | python | def _extend_component_definitions(self, graph):
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints | Read graph and update component definitions with related elements | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L333-L405 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_models | python | def _read_models(self, graph):
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj | Read graph and add models to document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L407-L419 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_module_definitions | python | def _read_module_definitions(self, graph):
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj | Read graph and add module defintions to document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L421-L458 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._extend_module_definitions | python | def _extend_module_definitions(self, graph):
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules | Using collected module definitions extend linkages | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L460-L481 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_annotations | python | def _read_annotations(self, graph):
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj | Find any non-defined elements at TopLevel and create annotations | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L483-L499 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document._read_collections | python | def _read_collections(self, graph):
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj | Read graph and add collections to document | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L501-L513 | null | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.read | python | def read(self, f):
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g) | Read in an SBOL file, replacing current document contents | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L515-L540 | [
"def clear_document(self):\n \"\"\"\n Clears ALL items from document, reseting it to clean\n \"\"\"\n self._components.clear()\n self._sequences.clear()\n self._namespaces.clear()\n self._models.clear()\n self._modules.clear()\n self._collections.clear()\n self._annotations.clear()\n self._functional_component_store.clear()\n self._collection_store.clear()\n",
"def _read_sequences(self, graph):\n \"\"\"\n Read graph and add sequences to document\n \"\"\"\n for e in self._get_elements(graph, SBOL.Sequence):\n identity = e[0]\n c = self._get_rdf_identified(graph, identity)\n c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)\n c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)\n seq = Sequence(**c)\n self._sequences[identity.toPython()] = seq\n self._collection_store[identity.toPython()] = seq\n",
"def _read_component_definitions(self, graph):\n \"\"\"\n Read graph and add component defintions to document\n \"\"\"\n for e in self._get_elements(graph, SBOL.ComponentDefinition):\n identity = e[0]\n # Store component values in dict\n c = self._get_rdf_identified(graph, identity)\n c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)\n c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)\n obj = ComponentDefinition(**c)\n self._components[identity.toPython()] = obj\n self._collection_store[identity.toPython()] = obj\n",
"def _extend_component_definitions(self, graph):\n \"\"\"\n Read graph and update component definitions with related elements\n \"\"\"\n for def_uri, comp_def in self._components.items():\n # Store created components indexed for later lookup\n component_index = {}\n identity = URIRef(def_uri)\n\n # Get components\n for comp in graph.triples((identity, SBOL.component, None)):\n comp_identity = comp[2]\n ci = self._get_rdf_identified(graph, comp_identity)\n ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)\n ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)\n\n component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)\n ci['definition'] = self._components[component_comp_def]\n\n c = Component(**ci)\n component_index[ci['identity']] = c\n self._components[def_uri].components = list(component_index.values())\n\n # Get sequence annotations\n if (identity, SBOL.sequenceAnnotation, None) in graph:\n find_annotation_using = (identity, SBOL.sequenceAnnotation, None)\n else:\n find_annotation_using = (identity, SBOL.SequenceAnnotation, None)\n sequence_annotations = []\n for seq_annot in graph.triples(find_annotation_using):\n seq_identity = seq_annot[2]\n sa = self._get_rdf_identified(graph, seq_identity)\n component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)\n sa['component'] = component_index[component_to_use]\n sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)\n locations = []\n for loc in graph.triples((seq_identity, SBOL.location, None)):\n loc_identity = loc[2]\n location = self._get_rdf_identified(graph, loc_identity)\n location['orientation'] = self._get_triplet_value(graph, loc_identity,\n SBOL.orientation)\n location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))\n if location_type == SBOL.Range:\n location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)\n location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)\n locations.append(Range(**location))\n elif location_type == SBOL.Cut:\n location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)\n locations.append(Cut(**location))\n else:\n locations.append(GenericLocation(**location))\n sa_obj = SequenceAnnotation(locations=locations, **sa)\n sequence_annotations.append(sa_obj)\n self._components[def_uri].sequence_annotations = sequence_annotations\n\n # Get sequence constraints\n if (identity, SBOL.sequenceConstraint, None) in graph:\n find_constraint_using = (identity, SBOL.sequenceConstraint, None)\n else:\n find_constraint_using = (identity, SBOL.SequenceConstraint, None)\n sequence_constraints = []\n for seq_constraint in graph.triples(find_constraint_using):\n seq_identity = seq_constraint[2]\n sc = self._get_rdf_identified(graph, seq_identity)\n sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)\n subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)\n sc['subject'] = component_index[subject_id]\n object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)\n # Object is a reserved word so call it obj to prevent clashes\n sc['obj'] = component_index[object_id]\n sc_obj = SequenceConstraint(**sc)\n sequence_constraints.append(sc_obj)\n self._components[def_uri].sequence_constraints = sequence_constraints\n",
"def _read_models(self, graph):\n \"\"\"\n Read graph and add models to document\n \"\"\"\n for e in self._get_elements(graph, SBOL.Model):\n identity = e[0]\n m = self._get_rdf_identified(graph, identity)\n m['source'] = self._get_triplet_value(graph, identity, SBOL.source)\n m['language'] = self._get_triplet_value(graph, identity, SBOL.language)\n m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)\n obj = Model(**m)\n self._models[identity.toPython()] = obj\n self._collection_store[identity.toPython()] = obj\n",
"def _read_module_definitions(self, graph):\n \"\"\"\n Read graph and add module defintions to document\n \"\"\"\n for e in self._get_elements(graph, SBOL.ModuleDefinition):\n identity = e[0]\n m = self._get_rdf_identified(graph, identity)\n m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)\n functional_components = {}\n for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):\n func_identity = func_comp[2]\n fc = self._get_rdf_identified(graph, func_identity)\n definition = self._get_triplet_value(graph, func_identity, SBOL.definition)\n fc['definition'] = self._components[definition]\n fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)\n fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)\n functional_components[func_identity.toPython()] = FunctionalComponent(**fc)\n self._functional_component_store[func_identity.toPython()] = \\\n functional_components[func_identity.toPython()]\n interactions = []\n for inter in graph.triples((identity, SBOL.interaction, None)):\n inter_identity = inter[2]\n it = self._get_rdf_identified(graph, inter_identity)\n it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)\n participations = []\n for p in graph.triples((inter_identity, SBOL.participation, None)):\n pc = self._get_rdf_identified(graph, p[2])\n roles = self._get_triplet_value_list(graph, p[2], SBOL.role)\n # Need to use one of the functional component created above\n participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)\n participant = functional_components[participant_id]\n participations.append(Participation(roles=roles, participant=participant, **pc))\n interactions.append(Interaction(participations=participations, **it))\n obj = ModuleDefinition(functional_components=functional_components.values(),\n interactions=interactions,\n **m)\n self._modules[identity.toPython()] = obj\n self._collection_store[identity.toPython()] = obj\n",
"def _extend_module_definitions(self, graph):\n \"\"\"\n Using collected module definitions extend linkages\n \"\"\"\n for mod_id in self._modules:\n mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)\n modules = []\n for mod in graph.triples((mod_identity, SBOL.module, None)):\n md = self._get_rdf_identified(graph, mod[2])\n definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)\n md['definition'] = self._modules[definition_id]\n maps_to = []\n for m in graph.triples((mod[2], SBOL.mapsTo, None)):\n mt = self._get_rdf_identified(graph, m[2])\n mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)\n local_id = self._get_triplet_value(graph, m[2], SBOL.local)\n remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)\n mt['local'] = self._functional_component_store[local_id]\n mt['remote'] = self._functional_component_store[remote_id]\n maps_to.append(MapsTo(**mt))\n modules.append(Module(maps_to=maps_to, **md))\n self._modules[mod_id].modules = modules\n",
"def _read_annotations(self, graph):\n \"\"\"\n Find any non-defined elements at TopLevel and create annotations\n \"\"\"\n flipped_namespaces = {v: k for k, v in self._namespaces.items()}\n for triple in graph.triples((None, RDF.type, None)):\n namespace, obj = split_uri(triple[2])\n prefix = flipped_namespaces[namespace]\n as_string = '{}:{}'.format(prefix, obj)\n if as_string not in VALID_ENTITIES:\n identity = triple[0]\n gt = self._get_rdf_identified(graph, identity)\n q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)\n gt['rdf_type'] = q_name\n gt_obj = GenericTopLevel(**gt)\n self._annotations[identity.toPython()] = gt_obj\n self._collection_store[identity.toPython()] = gt_obj\n",
"def _read_collections(self, graph):\n \"\"\"\n Read graph and add collections to document\n \"\"\"\n for e in self._get_elements(graph, SBOL.Collection):\n identity = e[0]\n c = self._get_rdf_identified(graph, identity)\n members = []\n # Need to handle other non-standard TopLevel objects first\n for m in graph.triples((identity, SBOL.member, None)):\n members.append(self._collection_store[m[2].toPython()])\n obj = Collection(members=members, **c)\n self._collections[identity.toPython()] = obj\n"
] | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
def write(self, f):
"""
Write an SBOL file from current document contents
"""
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
|
tjomasc/snekbol | snekbol/document.py | Document.write | python | def write(self, f):
rdf = ET.Element(NS('rdf', 'RDF'), nsmap=XML_NS)
# TODO: TopLevel Annotations
sequence_values = sorted(self._sequences.values(), key=lambda x: x.identity)
self._add_to_root(rdf, sequence_values)
component_values = sorted(self._components.values(), key=lambda x: x.identity)
self._add_to_root(rdf, component_values)
model_values = sorted(self._models.values(), key=lambda x: x.identity)
self._add_to_root(rdf, model_values)
module_values = sorted(self._modules.values(), key=lambda x: x.identity)
self._add_to_root(rdf, module_values)
collection_values = sorted(self._collections.values(), key=lambda x: x.identity)
self._add_to_root(rdf, collection_values)
annotation_values = sorted(self._annotations.values(), key=lambda x: x.identity)
self._add_to_root(rdf, annotation_values)
f.write(ET.tostring(rdf,
pretty_print=True,
xml_declaration=True,
encoding='utf-8')) | Write an SBOL file from current document contents | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L553-L576 | [
"def NS(namespace, tag):\n \"\"\"\n Generate a namespaced tag for use in creation of an XML file\n \"\"\"\n return '{' + XML_NS[namespace] + '}' + tag\n",
"def _add_to_root(self, root_node, elements):\n for item in elements:\n elem = item._as_rdf_xml(self.ns)\n root_node.append(elem)\n"
] | class Document(object):
"""
Provides a base for creating SBOL documents
"""
def __init__(self,
namespace,
validate=True):
# Don't access directly: use function getter/setters
self._components = {}
self._sequences = {}
self._namespaces = {}
self._models = {}
self._modules = {}
self._collections = {}
self._annotations = {}
# Used for looking up functional components when reading
# in data from a file
self._functional_component_store = {}
self._collection_store = {}
if validators.url(namespace):
self.document_namespace = namespace
else:
raise Exception('Invalid namespace URI')
self.validate = validate
# Create a document namesspace for use in RDF serialization
self.ns = Namespace(self.document_namespace)
def __str__(self):
return 'SBOL Document {{{}}}'.format(self.document_namespace)
def add_namespace(self, namespace, prefix):
"""
Add a namespace to the document
"""
self._namespaces[prefix] = namespace
def get_namespaces(self):
"""
Get all namespaces in the document (inc default)
"""
return XML_NS + self._namespaces
def _to_uri_from_namespace(self, value):
"""
Take a value and make a URL using the document namespace
"""
return urljoin(self.document_namespace, value)
def _append_to_uri(self, uri, value):
"""
Take an existing URI and append more data to it
"""
return urljoin(uri, value)
def add_component_definition(self, definition):
"""
Add a ComponentDefinition to the document
"""
# definition.identity = self._to_uri_from_namespace(definition.identity)
if definition.identity not in self._components.keys():
self._components[definition.identity] = definition
else:
raise ValueError("{} has already been defined".format(definition.identity))
def remove_component_definition(self, identity):
"""
Remove a ComponentDefinition from the document
"""
try:
self._components.pop(identity)
except KeyError:
pass
def get_component_definition(self, uri):
"""
Get a ComponentDefintion from the document
"""
try:
definition = self._components[uri]
except KeyError:
return None
return definition
def list_components(self):
"""
List of all ComponentDefinitions in the document
"""
return self._components.values()
def assemble_component(self, into_component, using_components):
"""
Assemble a list of already defined components into a structual hirearchy
"""
if not isinstance(using_components, list) or len(using_components) == 0:
raise Exception('Must supply list of ComponentDefinitions')
components = []
sequence_annotations = []
seq_elements = ''
for k, c in enumerate(using_components):
try:
self._components[c.identity]
except KeyError:
raise Exception('Must already have defined ComponentDefinition in document')
else:
identity = into_component.identity + '/' + c.identity
# All components are initially public, this can be changed later
component = Component(identity,
c,
'public',
display_id=c.identity)
components.append(component)
# If there is a sequence on the ComponentDefinition use the first element
if len(c.sequences) > 0:
# Add the sequence to the document
self._add_sequence(c.sequences[0])
# Get start/end points of sequence
start = len(seq_elements) + 1 # The sequence is usually 1 indexed
end = start + len(c.sequences[0].elements)
# Add to the component sequence element
seq_elements += c.sequences[0].elements
# Create a Range object to hold seq range
range_identity = identity + '_sequence_annotation/range'
seq_range = Range(range_identity, start, end, display_id='range')
# Create a SequenceAnnotation object to hold the range
annot_identity = identity + '_sequence_annotation'
seq_annot = SequenceAnnotation(annot_identity,
component=component,
locations=[seq_range],
display_id=c.identity + '_sequence_annotation')
sequence_annotations.append(seq_annot)
if seq_elements != '':
seq_encoding = using_components[0].sequences[0].encoding
seq_identity = '{}_sequence'.format(into_component.identity)
seq = Sequence(seq_identity, seq_elements, encoding=seq_encoding)
self._add_sequence(seq)
into_component.sequences.append(seq)
into_component.components = components
into_component.sequence_annotations = sequence_annotations
def _add_sequence(self, sequence):
"""
Add a Sequence to the document
"""
if sequence.identity not in self._sequences.keys():
self._sequences[sequence.identity] = sequence
else:
raise ValueError("{} has already been defined".format(sequence.identity))
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity))
def remove_model(self, identity):
"""
Remove a Model from the document
"""
try:
self._models.pop(identity)
except KeyError:
pass
def get_model(self, uri):
"""
Get a Model for the document
"""
pass
def add_module_definition(self, module_definition):
"""
Add a ModuleDefinition to the document
"""
if module_definition.identity not in self._module_definitions.keys():
self._module_definitions[module_definition.identity] = module_definition
else:
raise ValueError("{} has already been defined".format(module_definition.identity))
def remove_module_definition(self, identity):
"""
Remove a ModuleDefinition from the document
"""
try:
self._module_definitions.pop(identity)
except KeyError:
pass
def get_module_definition(self, uri):
"""
Get a ModuleDefinition from the document
"""
pass
def find(self, uri):
"""
Recursivly search document for URI
"""
pass
def get_components(self, uri):
"""
Get components from a component definition in order
"""
try:
component_definition = self._components[uri]
except KeyError:
return False
sorted_sequences = sorted(component_definition.sequence_annotations,
key=attrgetter('first_location'))
return [c.component for c in sorted_sequences]
def clear_document(self):
"""
Clears ALL items from document, reseting it to clean
"""
self._components.clear()
self._sequences.clear()
self._namespaces.clear()
self._models.clear()
self._modules.clear()
self._collections.clear()
self._annotations.clear()
self._functional_component_store.clear()
self._collection_store.clear()
def _get_elements(self, graph, element_type):
return graph.triples((None, RDF.type, element_type))
def _get_triplet_value(self, graph, identity, rdf_type):
"""
Get a value from an RDF triple
"""
value = graph.value(subject=identity, predicate=rdf_type)
return value.toPython() if value is not None else value
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values
def _get_rdf_identified(self, graph, identity):
c = {}
c['identity'] = identity.toPython() if type(identity) is not str else identity
c['display_id'] = self._get_triplet_value(graph, identity, SBOL.displayId)
c['was_derived_from'] = self._get_triplet_value(graph, identity, PROV.wasDerivedFrom)
c['version'] = self._get_triplet_value(graph, identity, SBOL.version)
c['description'] = self._get_triplet_value(graph, identity, DCTERMS.description)
c['name'] = self._get_triplet_value(graph, identity, DCTERMS.title)
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
# Get annotations (non top level)
c['annotations'] = []
for triple in graph.triples((identity, None, None)):
namespace, obj = split_uri(triple[1])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
if isinstance(triple[2], URIRef):
value = AnnotationValue(uri=triple[2].toPython())
elif isinstance(triple[2], Literal):
value = AnnotationValue(literal=triple[2].toPython())
else:
value = None
c['annotations'].append(Annotation(q_name=q_name, annotation_value=value))
return c
def _read_sequences(self, graph):
"""
Read graph and add sequences to document
"""
for e in self._get_elements(graph, SBOL.Sequence):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements)
c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding)
seq = Sequence(**c)
self._sequences[identity.toPython()] = seq
self._collection_store[identity.toPython()] = seq
def _read_component_definitions(self, graph):
"""
Read graph and add component defintions to document
"""
for e in self._get_elements(graph, SBOL.ComponentDefinition):
identity = e[0]
# Store component values in dict
c = self._get_rdf_identified(graph, identity)
c['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
c['types'] = self._get_triplet_value_list(graph, identity, SBOL.type)
obj = ComponentDefinition(**c)
self._components[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints
def _read_models(self, graph):
"""
Read graph and add models to document
"""
for e in self._get_elements(graph, SBOL.Model):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['source'] = self._get_triplet_value(graph, identity, SBOL.source)
m['language'] = self._get_triplet_value(graph, identity, SBOL.language)
m['framework'] = self._get_triplet_value(graph, identity, SBOL.framework)
obj = Model(**m)
self._models[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _read_module_definitions(self, graph):
"""
Read graph and add module defintions to document
"""
for e in self._get_elements(graph, SBOL.ModuleDefinition):
identity = e[0]
m = self._get_rdf_identified(graph, identity)
m['roles'] = self._get_triplet_value_list(graph, identity, SBOL.role)
functional_components = {}
for func_comp in graph.triples((identity, SBOL.functionalComponent, None)):
func_identity = func_comp[2]
fc = self._get_rdf_identified(graph, func_identity)
definition = self._get_triplet_value(graph, func_identity, SBOL.definition)
fc['definition'] = self._components[definition]
fc['access'] = self._get_triplet_value(graph, func_identity, SBOL.access)
fc['direction'] = self._get_triplet_value(graph, func_identity, SBOL.direction)
functional_components[func_identity.toPython()] = FunctionalComponent(**fc)
self._functional_component_store[func_identity.toPython()] = \
functional_components[func_identity.toPython()]
interactions = []
for inter in graph.triples((identity, SBOL.interaction, None)):
inter_identity = inter[2]
it = self._get_rdf_identified(graph, inter_identity)
it['types'] = self._get_triplet_value_list(graph, inter_identity, SBOL.types)
participations = []
for p in graph.triples((inter_identity, SBOL.participation, None)):
pc = self._get_rdf_identified(graph, p[2])
roles = self._get_triplet_value_list(graph, p[2], SBOL.role)
# Need to use one of the functional component created above
participant_id = self._get_triplet_value(graph, p[2], SBOL.participant)
participant = functional_components[participant_id]
participations.append(Participation(roles=roles, participant=participant, **pc))
interactions.append(Interaction(participations=participations, **it))
obj = ModuleDefinition(functional_components=functional_components.values(),
interactions=interactions,
**m)
self._modules[identity.toPython()] = obj
self._collection_store[identity.toPython()] = obj
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
def _read_annotations(self, graph):
"""
Find any non-defined elements at TopLevel and create annotations
"""
flipped_namespaces = {v: k for k, v in self._namespaces.items()}
for triple in graph.triples((None, RDF.type, None)):
namespace, obj = split_uri(triple[2])
prefix = flipped_namespaces[namespace]
as_string = '{}:{}'.format(prefix, obj)
if as_string not in VALID_ENTITIES:
identity = triple[0]
gt = self._get_rdf_identified(graph, identity)
q_name = QName(namespace=namespace, local_name=obj, prefix=prefix)
gt['rdf_type'] = q_name
gt_obj = GenericTopLevel(**gt)
self._annotations[identity.toPython()] = gt_obj
self._collection_store[identity.toPython()] = gt_obj
def _read_collections(self, graph):
"""
Read graph and add collections to document
"""
for e in self._get_elements(graph, SBOL.Collection):
identity = e[0]
c = self._get_rdf_identified(graph, identity)
members = []
# Need to handle other non-standard TopLevel objects first
for m in graph.triples((identity, SBOL.member, None)):
members.append(self._collection_store[m[2].toPython()])
obj = Collection(members=members, **c)
self._collections[identity.toPython()] = obj
def read(self, f):
"""
Read in an SBOL file, replacing current document contents
"""
self.clear_document()
g = Graph()
g.parse(f, format='xml')
for n in g.namespaces():
ns = n[1].toPython()
if not ns.endswith(('#', '/', ':')):
ns = ns + '/'
self._namespaces[n[0]] = ns
# Extend the existing namespaces available
XML_NS[n[0]] = ns
self._read_sequences(g)
self._read_component_definitions(g)
self._extend_component_definitions(g)
self._read_models(g)
self._read_module_definitions(g)
self._extend_module_definitions(g)
self._read_annotations(g)
# Last as this needs all other top level objects created
self._read_collections(g)
def append(self, f):
"""
Read an SBOL file and append contents to current document
"""
pass
def _add_to_root(self, root_node, elements):
for item in elements:
elem = item._as_rdf_xml(self.ns)
root_node.append(elem)
|
tjomasc/snekbol | snekbol/identified.py | Identified._as_rdf_xml | python | def _as_rdf_xml(self, ns):
self.rdf_identity = self._get_identity(ns)
elements = []
elements.append(ET.Element(NS('sbol', 'persistentIdentity'),
attrib={NS('rdf', 'resource'):
self._get_persistent_identitity(ns)}))
if self.name is not None:
name = ET.Element(NS('dcterms', 'title'))
name.text = self.name
elements.append(name)
if self.display_id is not None:
display_id = ET.Element(NS('sbol', 'displayId'))
display_id.text = self.display_id
elements.append(display_id)
if self.version is not None:
version = ET.Element(NS('sbol', 'version'))
version.text = self.version
elements.append(version)
if self.was_derived_from is not None:
elements.append(ET.Element(NS('prov', 'wasDerivedFrom'),
attrib={NS('rdf', 'resource'): self.was_derived_from}))
if self.description is not None:
description = ET.Element(NS('dcterms', 'description'))
description.text = self.description
elements.append(description)
for a in self.annotations:
elements.append(a._as_rdf_xml(ns))
return elements | Return identity details for the element as XML nodes | train | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/identified.py#L68-L98 | null | class Identified(object):
"""
Mixin to provide identity support to SBOL objects
"""
def __init__(self,
identity,
name = None,
was_derived_from = None,
version = None,
description = None,
display_id = None,
annotations=[]):
self.identity = identity
self.name = name
self.was_derived_from = was_derived_from
self.version = version
self.description = description
self.display_id = display_id
self.annotations = annotations
if display_id is None:
self.display_id = re.sub('[\W_]+', '_', identity)
@property
def persistent_identitity(self):
return '{}/{}'.format(self.display_id, self.version)
def _get_identity(self, namespace=None, postfix=None):
identity = self.identity
if postfix is not None:
identity = '{}{}'.format(self.identity, postfix)
if not validators.url(identity):
return namespace[identity]
else:
return identity
def _get_persistent_identitity(self, namespace):
if self.version is not None:
return '{}/{}/{}'.format(self._get_identity(namespace), self.display_id, self.version)
return '{}'.format(self._get_identity(namespace))
def _get_rdf_identity(self, namespace=None, postfix=None):
identity = self.identity
if postfix is not None:
identity = '{}{}'.format(self.identity, postfix)
if not validators.url(identity):
return URIRef(namespace[identity])
else:
return URIRef(identity)
def _get_rdf_persistent_identitity(self, namespace):
if self.version is not None:
return URIRef('{}/{}'.format(self._get_rdf_identity(namespace), self.version))
return self._get_rdf_identity(namespace)
|
sveetch/py-css-styleguide | py_css_styleguide/parser.py | TinycssSourceParser.digest_prelude | python | def digest_prelude(self, rule):
name = []
for token in rule.prelude:
if token.type == 'ident':
name.append(token.value)
return "__".join(name) | Walk on rule prelude (aka CSS selector) tokens to return a string of
the value name (from css selector).
Actually only simple selector and selector with descendant combinator
are supported. Using any other selector kind may leads to unexpected
issues.
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
string: Selector name. If it's a descendant combinator, items are
joined with ``__``. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/parser.py#L29-L52 | null | class TinycssSourceParser(object):
"""
CSS parser using tinycss2
Since tinycss2 only return tokens, this parser is in charge to turn them
to usable datas: a dict of properties for each selector.
"""
def digest_content(self, rule):
"""
Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties.
"""
data = OrderedDict()
current_key = None
for token in rule.content:
# Assume first identity token is the property name
if token.type == 'ident':
# Ignore starting '-' from css variables
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
# Assume first following string token is the property value.
if token.type == 'string':
data[current_key] = token.value
return data
def consume(self, source):
"""
Parse source and consume tokens from tinycss2.
Arguments:
source (string): Source content to parse.
Returns:
dict: Retrieved rules.
"""
manifest = OrderedDict()
rules = parse_stylesheet(
source,
skip_comments=True,
skip_whitespace=True,
)
for rule in rules:
# Gather rule selector+properties
name = self.digest_prelude(rule)
# Ignore everything out of styleguide namespace
if not name.startswith(RULE_BASE_PREFIX):
continue
properties = self.digest_content(rule)
manifest[name] = properties
return manifest
def parse(self, source):
"""
Read and parse CSS source and return dict of rules.
Arguments:
source (string): Source content to parse.
Returns:
dict: Selectors with their properties.
"""
return self.consume(source)
|
sveetch/py-css-styleguide | py_css_styleguide/parser.py | TinycssSourceParser.digest_content | python | def digest_content(self, rule):
data = OrderedDict()
current_key = None
for token in rule.content:
# Assume first identity token is the property name
if token.type == 'ident':
# Ignore starting '-' from css variables
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
# Assume first following string token is the property value.
if token.type == 'string':
data[current_key] = token.value
return data | Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/parser.py#L54-L87 | null | class TinycssSourceParser(object):
"""
CSS parser using tinycss2
Since tinycss2 only return tokens, this parser is in charge to turn them
to usable datas: a dict of properties for each selector.
"""
def digest_prelude(self, rule):
"""
Walk on rule prelude (aka CSS selector) tokens to return a string of
the value name (from css selector).
Actually only simple selector and selector with descendant combinator
are supported. Using any other selector kind may leads to unexpected
issues.
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
string: Selector name. If it's a descendant combinator, items are
joined with ``__``.
"""
name = []
for token in rule.prelude:
if token.type == 'ident':
name.append(token.value)
return "__".join(name)
def consume(self, source):
"""
Parse source and consume tokens from tinycss2.
Arguments:
source (string): Source content to parse.
Returns:
dict: Retrieved rules.
"""
manifest = OrderedDict()
rules = parse_stylesheet(
source,
skip_comments=True,
skip_whitespace=True,
)
for rule in rules:
# Gather rule selector+properties
name = self.digest_prelude(rule)
# Ignore everything out of styleguide namespace
if not name.startswith(RULE_BASE_PREFIX):
continue
properties = self.digest_content(rule)
manifest[name] = properties
return manifest
def parse(self, source):
"""
Read and parse CSS source and return dict of rules.
Arguments:
source (string): Source content to parse.
Returns:
dict: Selectors with their properties.
"""
return self.consume(source)
|
sveetch/py-css-styleguide | py_css_styleguide/parser.py | TinycssSourceParser.consume | python | def consume(self, source):
manifest = OrderedDict()
rules = parse_stylesheet(
source,
skip_comments=True,
skip_whitespace=True,
)
for rule in rules:
# Gather rule selector+properties
name = self.digest_prelude(rule)
# Ignore everything out of styleguide namespace
if not name.startswith(RULE_BASE_PREFIX):
continue
properties = self.digest_content(rule)
manifest[name] = properties
return manifest | Parse source and consume tokens from tinycss2.
Arguments:
source (string): Source content to parse.
Returns:
dict: Retrieved rules. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/parser.py#L89-L118 | [
"def digest_prelude(self, rule):\n \"\"\"\n Walk on rule prelude (aka CSS selector) tokens to return a string of\n the value name (from css selector).\n\n Actually only simple selector and selector with descendant combinator\n are supported. Using any other selector kind may leads to unexpected\n issues.\n\n Arguments:\n rule (tinycss2.ast.QualifiedRule): Qualified rule object as\n returned by tinycss2.\n\n Returns:\n string: Selector name. If it's a descendant combinator, items are\n joined with ``__``.\n \"\"\"\n name = []\n\n for token in rule.prelude:\n if token.type == 'ident':\n name.append(token.value)\n\n return \"__\".join(name)\n",
"def digest_content(self, rule):\n \"\"\"\n Walk on rule content tokens to return a dict of properties.\n\n This is pretty naive and will choke/fail on everything that is more\n evolved than simple ``ident(string):value(string)``\n\n Arguments:\n rule (tinycss2.ast.QualifiedRule): Qualified rule object as\n returned by tinycss2.\n\n Returns:\n dict: Dictionnary of retrieved variables and properties.\n \"\"\"\n data = OrderedDict()\n\n current_key = None\n\n for token in rule.content:\n # Assume first identity token is the property name\n if token.type == 'ident':\n # Ignore starting '-' from css variables\n name = token.value\n if name.startswith('-'):\n name = name[1:]\n\n current_key = name\n data[current_key] = None\n\n # Assume first following string token is the property value.\n if token.type == 'string':\n data[current_key] = token.value\n\n return data\n"
] | class TinycssSourceParser(object):
"""
CSS parser using tinycss2
Since tinycss2 only return tokens, this parser is in charge to turn them
to usable datas: a dict of properties for each selector.
"""
def digest_prelude(self, rule):
"""
Walk on rule prelude (aka CSS selector) tokens to return a string of
the value name (from css selector).
Actually only simple selector and selector with descendant combinator
are supported. Using any other selector kind may leads to unexpected
issues.
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
string: Selector name. If it's a descendant combinator, items are
joined with ``__``.
"""
name = []
for token in rule.prelude:
if token.type == 'ident':
name.append(token.value)
return "__".join(name)
def digest_content(self, rule):
"""
Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties.
"""
data = OrderedDict()
current_key = None
for token in rule.content:
# Assume first identity token is the property name
if token.type == 'ident':
# Ignore starting '-' from css variables
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
# Assume first following string token is the property value.
if token.type == 'string':
data[current_key] = token.value
return data
def parse(self, source):
"""
Read and parse CSS source and return dict of rules.
Arguments:
source (string): Source content to parse.
Returns:
dict: Selectors with their properties.
"""
return self.consume(source)
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.validate_rule_name | python | def validate_rule_name(self, name):
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L46-L69 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.validate_variable_name | python | def validate_variable_name(self, name):
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L71-L94 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.value_splitter | python | def value_splitter(self, reference, prop, value, mode):
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items | Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list: | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L96-L134 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_json | python | def serialize_to_json(self, name, datas):
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content | Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L136-L159 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_nested | python | def serialize_to_nested(self, name, datas):
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context | Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L161-L205 | [
"def value_splitter(self, reference, prop, value, mode):\n \"\"\"\n Split a string into a list items.\n\n Default behavior is to split on white spaces.\n\n\n Arguments:\n reference (string): Reference name used when raising possible\n error.\n prop (string): Property name used when raising possible error.\n value (string): Property value to split.\n mode (string): Splitter mode. Default should come from\n ``ManifestSerializer._DEFAULT_SPLITTER``.\n\n Available splitter are:\n\n * ``white-space``: Simply split a string on white spaces;\n * ``json-list``: Assume the string is a JSON list to parse;\n\n Returns:\n list:\n \"\"\"\n items = []\n\n if mode == 'json-list':\n try:\n items = json.loads(value)\n except json.JSONDecodeError as e:\n print(value)\n msg = (\"Reference '{ref}' raised JSON decoder error when \"\n \"splitting values from '{prop}': {err}'\")\n raise SerializerError(msg.format(ref=reference, prop=prop,\n err=e))\n else:\n if len(value) > 0:\n items = value.split(\" \")\n\n return items\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_flat | python | def serialize_to_flat(self, name, datas):
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values)) | Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L207-L247 | [
"def value_splitter(self, reference, prop, value, mode):\n \"\"\"\n Split a string into a list items.\n\n Default behavior is to split on white spaces.\n\n\n Arguments:\n reference (string): Reference name used when raising possible\n error.\n prop (string): Property name used when raising possible error.\n value (string): Property value to split.\n mode (string): Splitter mode. Default should come from\n ``ManifestSerializer._DEFAULT_SPLITTER``.\n\n Available splitter are:\n\n * ``white-space``: Simply split a string on white spaces;\n * ``json-list``: Assume the string is a JSON list to parse;\n\n Returns:\n list:\n \"\"\"\n items = []\n\n if mode == 'json-list':\n try:\n items = json.loads(value)\n except json.JSONDecodeError as e:\n print(value)\n msg = (\"Reference '{ref}' raised JSON decoder error when \"\n \"splitting values from '{prop}': {err}'\")\n raise SerializerError(msg.format(ref=reference, prop=prop,\n err=e))\n else:\n if len(value) > 0:\n items = value.split(\" \")\n\n return items\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_list | python | def serialize_to_list(self, name, datas):
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items | Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L249-L274 | [
"def value_splitter(self, reference, prop, value, mode):\n \"\"\"\n Split a string into a list items.\n\n Default behavior is to split on white spaces.\n\n\n Arguments:\n reference (string): Reference name used when raising possible\n error.\n prop (string): Property name used when raising possible error.\n value (string): Property value to split.\n mode (string): Splitter mode. Default should come from\n ``ManifestSerializer._DEFAULT_SPLITTER``.\n\n Available splitter are:\n\n * ``white-space``: Simply split a string on white spaces;\n * ``json-list``: Assume the string is a JSON list to parse;\n\n Returns:\n list:\n \"\"\"\n items = []\n\n if mode == 'json-list':\n try:\n items = json.loads(value)\n except json.JSONDecodeError as e:\n print(value)\n msg = (\"Reference '{ref}' raised JSON decoder error when \"\n \"splitting values from '{prop}': {err}'\")\n raise SerializerError(msg.format(ref=reference, prop=prop,\n err=e))\n else:\n if len(value) > 0:\n items = value.split(\" \")\n\n return items\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_string | python | def serialize_to_string(self, name, datas):
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value | Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L276-L296 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_meta_references | python | def get_meta_references(self, datas):
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names | Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L298-L344 | [
"def validate_rule_name(self, name):\n \"\"\"\n Validate rule name.\n\n Arguments:\n name (string): Rule name.\n\n Returns:\n bool: ``True`` if rule name is valid.\n \"\"\"\n if not name:\n raise SerializerError(\"Rule name is empty\".format(name))\n\n if name[0] not in RULE_ALLOWED_START:\n msg = \"Rule name '{}' must starts with a letter\"\n raise SerializerError(msg.format(name))\n\n for item in name:\n if item not in RULE_ALLOWED_CHARS:\n msg = (\"Invalid rule name '{}': it must only contains \"\n \"letters, numbers and '_' character\")\n raise SerializerError(msg.format(name))\n\n return True\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_reference | python | def get_reference(self, datas, name):
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context | Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L346-L410 | [
"def validate_variable_name(self, name):\n \"\"\"\n Validate variable name.\n\n Arguments:\n name (string): Property name.\n\n Returns:\n bool: ``True`` if variable name is valid.\n \"\"\"\n if not name:\n raise SerializerError(\"Variable name is empty\".format(name))\n\n if name[0] not in PROPERTY_ALLOWED_START:\n msg = \"Variable name '{}' must starts with a letter\"\n raise SerializerError(msg.format(name))\n\n for item in name:\n if item not in PROPERTY_ALLOWED_CHARS:\n msg = (\"Invalid variable name '{}': it must only contains \"\n \"letters, numbers and '_' character\")\n raise SerializerError(msg.format(name))\n\n return True\n",
"def serialize_to_json(self, name, datas):\n \"\"\"\n Serialize given datas to any object from assumed JSON string.\n\n Arguments:\n name (string): Name only used inside possible exception message.\n datas (dict): Datas to serialize.\n\n Returns:\n object: Object depending from JSON content.\n \"\"\"\n data_object = datas.get('object', None)\n\n if data_object is None:\n msg = (\"JSON reference '{}' lacks of required 'object' variable\")\n raise SerializerError(msg.format(name))\n\n try:\n content = json.loads(data_object, object_pairs_hook=OrderedDict)\n except json.JSONDecodeError as e:\n msg = \"JSON reference '{}' raised error from JSON decoder: {}\"\n raise SerializerError(msg.format(name, e))\n else:\n return content\n",
"def serialize_to_nested(self, name, datas):\n \"\"\"\n Serialize given datas to a nested structure where each key create an\n item and each other variable is stored as a subitem with corresponding\n value (according to key index position).\n\n Arguments:\n name (string): Name only used inside possible exception message.\n datas (dict): Datas to serialize.\n\n Returns:\n dict: Nested dictionnary of serialized reference datas.\n \"\"\"\n keys = datas.get('keys', None)\n splitter = datas.get('splitter', self._DEFAULT_SPLITTER)\n\n if not keys:\n msg = (\"Nested reference '{}' lacks of required 'keys' variable \"\n \"or is empty\")\n raise SerializerError(msg.format(name))\n else:\n keys = self.value_splitter(name, 'keys', keys, mode=splitter)\n\n # Initialize context dict with reference keys\n context = OrderedDict()\n for k in keys:\n context[k] = OrderedDict()\n\n # Tidy each variable value to its respective item\n for k, v in datas.items():\n # Ignore reserved internal keywords\n if k not in ('keys', 'structure', 'splitter'):\n values = self.value_splitter(name, 'values', v, mode=splitter)\n\n if len(values) != len(keys):\n msg = (\"Nested reference '{}' has different length for \"\n \"values of '{}' and 'keys'\")\n raise SerializerError(msg.format(name, k))\n\n # Put each value to its respective key using position index.\n for i, item in enumerate(values):\n ref = keys[i]\n context[ref][k] = item\n\n return context\n",
"def serialize_to_flat(self, name, datas):\n \"\"\"\n Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``\n comes from ``keys`` variable and ``VALUE`` comes from ``values``\n variable.\n\n This means both ``keys`` and ``values`` are required variable to be\n correctly filled (each one is a string of item separated with an empty\n space). Both resulting list must be the same length.\n\n Arguments:\n name (string): Name only used inside possible exception message.\n datas (dict): Datas to serialize.\n\n Returns:\n dict: Flat dictionnay of serialized reference datas.\n \"\"\"\n keys = datas.get('keys', None)\n values = datas.get('values', None)\n splitter = datas.get('splitter', self._DEFAULT_SPLITTER)\n\n if not keys:\n msg = (\"Flat reference '{}' lacks of required 'keys' variable or \"\n \"is empty\")\n raise SerializerError(msg.format(name))\n else:\n keys = self.value_splitter(name, 'keys', keys, mode=splitter)\n\n if not values:\n msg = (\"Flat reference '{}' lacks of required 'values' variable \"\n \"or is empty\")\n raise SerializerError(msg.format(name))\n else:\n values = self.value_splitter(name, 'values', values, mode=splitter)\n\n if len(values) != len(keys):\n msg = (\"Flat reference have different length of 'keys' ands \"\n \"'values' variable\")\n raise SerializerError(msg.format(name))\n\n return OrderedDict(zip(keys, values))\n",
"def serialize_to_list(self, name, datas):\n \"\"\"\n Serialize given datas to a list structure.\n\n List structure is very simple and only require a variable ``--items``\n which is a string of values separated with an empty space. Every other\n properties are ignored.\n\n Arguments:\n name (string): Name only used inside possible exception message.\n datas (dict): Datas to serialize.\n\n Returns:\n list: List of serialized reference datas.\n \"\"\"\n items = datas.get('items', None)\n splitter = datas.get('splitter', self._DEFAULT_SPLITTER)\n\n if items is None:\n msg = (\"List reference '{}' lacks of required 'items' variable \"\n \"or is empty\")\n raise SerializerError(msg.format(name))\n else:\n items = self.value_splitter(name, 'items', items, mode=splitter)\n\n return items\n",
"def serialize_to_string(self, name, datas):\n \"\"\"\n Serialize given datas to a string.\n\n Simply return the value from required variable``value``.\n\n Arguments:\n name (string): Name only used inside possible exception message.\n datas (dict): Datas to serialize.\n\n Returns:\n string: Value.\n \"\"\"\n value = datas.get('value', None)\n\n if value is None:\n msg = (\"String reference '{}' lacks of required 'value' variable \"\n \"or is empty\")\n raise SerializerError(msg.format(name))\n\n return value\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_available_references | python | def get_available_references(self, datas):
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L412-L434 | null | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_enabled_references | python | def get_enabled_references(self, datas, meta_references):
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references | Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L436-L456 | [
"def get_reference(self, datas, name):\n \"\"\"\n Get serialized reference datas\n\n Because every reference is turned to a dict (that stands on ``keys``\n variable that is a list of key names), every variables must have the\n same exact length of word than the key name list.\n\n A reference name starts with 'styleguide-reference-' followed by\n name for reference.\n\n A reference can contains variable ``--structure`` setted to ``\"flat\"``,\n ``\"list\"`` or ``\"string\"`` to define serialization structure.\n\n Arguments:\n datas (dict): Data where to search for reference declaration. This\n is commonly the fully parsed manifest.\n name (string): Reference name to get and serialize.\n\n Returns:\n collections.OrderedDict: Serialized reference datas.\n \"\"\"\n rule_name = '-'.join((RULE_REFERENCE, name))\n structure_mode = 'nested'\n\n if rule_name not in datas:\n msg = \"Unable to find enabled reference '{}'\"\n raise SerializerError(msg.format(name))\n\n properties = datas.get(rule_name)\n\n # Search for \"structure\" variable\n if 'structure' in properties:\n if properties['structure'] == 'flat':\n structure_mode = 'flat'\n elif properties['structure'] == 'list':\n structure_mode = 'list'\n elif properties['structure'] == 'string':\n structure_mode = 'string'\n elif properties['structure'] == 'json':\n structure_mode = 'json'\n elif properties['structure'] == 'nested':\n pass\n else:\n msg = \"Invalid structure mode name '{}' for reference '{}'\"\n raise SerializerError(msg.format(structure_mode, name))\n del properties['structure']\n\n # Validate variable names\n for item in properties.keys():\n self.validate_variable_name(item)\n\n # Perform serialize according to structure mode\n if structure_mode == 'flat':\n context = self.serialize_to_flat(name, properties)\n elif structure_mode == 'list':\n context = self.serialize_to_list(name, properties)\n elif structure_mode == 'string':\n context = self.serialize_to_string(name, properties)\n elif structure_mode == 'nested':\n context = self.serialize_to_nested(name, properties)\n elif structure_mode == 'json':\n context = self.serialize_to_json(name, properties)\n\n return context\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize | python | def serialize(self, datas):
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references']) | Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L458-L476 | [
"def get_meta_references(self, datas):\n \"\"\"\n Get manifest enabled references declaration\n\n This required declaration is readed from\n ``styleguide-metas-references`` rule that require either a ``--names``\n or ``--auto`` variable, each one define the mode to enable reference:\n\n Manually\n Using ``--names`` which define a list of names to enable, every\n other non enabled rule will be ignored.\n\n Section name (and so Reference name also) must not contains special\n character nor ``-`` so they still be valid variable name for almost\n any languages. For word separator inside name, use ``_``.\n Automatic\n Using ``--auto`` variable every reference rules will be enabled.\n The value of this variable is not important since it is not empty.\n\n If both of these variables are defined, the manual enable mode is used.\n\n Arguments:\n datas (dict): Data where to search for meta references declaration.\n This is commonly the fully parsed manifest.\n\n Returns:\n list: A list of reference names.\n \"\"\"\n rule = datas.get(RULE_META_REFERENCES, {})\n\n if not rule:\n msg = \"Manifest lacks of '.{}' or is empty\"\n raise SerializerError(msg.format(RULE_META_REFERENCES))\n else:\n if rule.get('names', None):\n names = rule.get('names').split(\" \")\n elif rule.get('auto', None):\n names = self.get_available_references(datas)\n else:\n msg = (\"'.{}' either require '--names' or '--auto' variable \"\n \"to be defined\")\n raise SerializerError(msg.format(RULE_META_REFERENCES))\n\n for item in names:\n self.validate_rule_name(item)\n\n return names\n",
"def get_enabled_references(self, datas, meta_references):\n \"\"\"\n Get enabled manifest references declarations.\n\n Enabled references are defined through meta references declaration,\n every other references are ignored.\n\n Arguments:\n datas (dict): Data where to search for reference declarations.\n This is commonly the fully parsed manifest.\n meta_references (list): List of enabled reference names.\n\n Returns:\n collections.OrderedDict: Serialized enabled references datas.\n \"\"\"\n references = OrderedDict()\n\n for section in meta_references:\n references[section] = self.get_reference(datas, section)\n\n return references\n"
] | class ManifestSerializer(object):
"""
Serialize parsed CSS to data suitable to Manifest
Raises:
SerializerError: When there is an invalid syntax in datas.
Attributes:
_metas (collections.OrderedDict): Buffer to store serialized metas
from parsed source. Default is an empty dict which reseted and
filled from ``serialize`` method.
_DEFAULT_SPLITTER (string): Default value splitter used for some
structure kinds.
"""
_DEFAULT_SPLITTER = 'white-space'
def __init__(self):
self._metas = OrderedDict()
def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True
def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items
def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content
def serialize_to_nested(self, name, datas):
"""
Serialize given datas to a nested structure where each key create an
item and each other variable is stored as a subitem with corresponding
value (according to key index position).
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Nested dictionnary of serialized reference datas.
"""
keys = datas.get('keys', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Nested reference '{}' lacks of required 'keys' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
# Initialize context dict with reference keys
context = OrderedDict()
for k in keys:
context[k] = OrderedDict()
# Tidy each variable value to its respective item
for k, v in datas.items():
# Ignore reserved internal keywords
if k not in ('keys', 'structure', 'splitter'):
values = self.value_splitter(name, 'values', v, mode=splitter)
if len(values) != len(keys):
msg = ("Nested reference '{}' has different length for "
"values of '{}' and 'keys'")
raise SerializerError(msg.format(name, k))
# Put each value to its respective key using position index.
for i, item in enumerate(values):
ref = keys[i]
context[ref][k] = item
return context
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value
def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names
def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references
|
sveetch/py-css-styleguide | py_css_styleguide/model.py | Manifest.load | python | def load(self, source, filepath=None):
# Set _path if source is a file-like object
try:
self._path = source.name
except AttributeError:
self._path = filepath
# Get source content either it's a string or a file-like object
try:
source_content = source.read()
except AttributeError:
source_content = source
# Parse and serialize given source
parser = TinycssSourceParser()
self._datas = parser.parse(source_content)
serializer = ManifestSerializer()
references = serializer.serialize(self._datas)
# Copy serialized metas
self.metas = serializer._metas
# Set every enabled rule as object attribute
for k, v in references.items():
self.set_rule(k, v)
return self._datas | Load source as manifest attributes
Arguments:
source (string or file-object): CSS source to parse and serialize
to find metas and rules. It can be either a string or a
file-like object (aka with a ``read()`` method which return
string).
Keyword Arguments:
filepath (string): Optional filepath to memorize if source comes
from a file. Default is ``None`` as if source comes from a
string. If ``source`` argument is a file-like object, you
should not need to bother of this argument since filepath will
be filled from source ``name`` attribute.
Returns:
dict: Dictionnary of serialized rules. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/model.py#L41-L87 | [
"def set_rule(self, name, properties):\n \"\"\"\n Set a rules as object attribute.\n\n Arguments:\n name (string): Rule name to set as attribute name.\n properties (dict): Dictionnary of properties.\n \"\"\"\n self._rule_attrs.append(name)\n setattr(self, name, properties)\n",
"def parse(self, source):\n \"\"\"\n Read and parse CSS source and return dict of rules.\n\n Arguments:\n source (string): Source content to parse.\n\n Returns:\n dict: Selectors with their properties.\n \"\"\"\n return self.consume(source)\n",
"def serialize(self, datas):\n \"\"\"\n Serialize datas to manifest structure with metas and references.\n\n Only references are returned, metas are assigned to attribute\n ``ManifestSerializer._metas``.\n\n Arguments:\n datas (dict): Data where to search for reference declarations. This\n is commonly the fully parsed manifest.\n\n Returns:\n collections.OrderedDict: Serialized enabled references datas.\n \"\"\"\n self._metas = OrderedDict({\n 'references': self.get_meta_references(datas),\n })\n\n return self.get_enabled_references(datas, self._metas['references'])\n"
] | class Manifest(object):
"""
Manifest model
During load process, every rule is stored as object attribute so you can
reach them directly.
Attributes:
_path (string): Possible filepath for source if it has been given or
finded from source file-object.
_datas (dict): Dictionnary of every rules returned by parser. This
is not something you would need to reach commonly.
_rule_attrs (list): List of registered reference rules. You may use
it in iteration to find available reference attribute names.
metas (dict): Dictionnary of every metas returned by serializer.
"""
def __init__(self):
self._path = None
self._datas = None
self._rule_attrs = []
self.metas = {}
def set_rule(self, name, properties):
"""
Set a rules as object attribute.
Arguments:
name (string): Rule name to set as attribute name.
properties (dict): Dictionnary of properties.
"""
self._rule_attrs.append(name)
setattr(self, name, properties)
def remove_rule(self, name):
"""
Remove a rule from attributes.
Arguments:
name (string): Rule name to remove.
"""
self._rule_attrs.remove(name)
delattr(self, name)
def to_json(self, indent=4):
"""
Serialize metas and reference attributes to a JSON string.
Keyword Arguments:
indent (int): Space indentation, default to ``4``.
Returns:
string: JSON datas.
"""
agregate = {
'metas': self.metas,
}
agregate.update({k: getattr(self, k) for k in self._rule_attrs})
return json.dumps(agregate, indent=indent)
|
sveetch/py-css-styleguide | py_css_styleguide/model.py | Manifest.set_rule | python | def set_rule(self, name, properties):
self._rule_attrs.append(name)
setattr(self, name, properties) | Set a rules as object attribute.
Arguments:
name (string): Rule name to set as attribute name.
properties (dict): Dictionnary of properties. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/model.py#L89-L98 | null | class Manifest(object):
"""
Manifest model
During load process, every rule is stored as object attribute so you can
reach them directly.
Attributes:
_path (string): Possible filepath for source if it has been given or
finded from source file-object.
_datas (dict): Dictionnary of every rules returned by parser. This
is not something you would need to reach commonly.
_rule_attrs (list): List of registered reference rules. You may use
it in iteration to find available reference attribute names.
metas (dict): Dictionnary of every metas returned by serializer.
"""
def __init__(self):
self._path = None
self._datas = None
self._rule_attrs = []
self.metas = {}
def load(self, source, filepath=None):
"""
Load source as manifest attributes
Arguments:
source (string or file-object): CSS source to parse and serialize
to find metas and rules. It can be either a string or a
file-like object (aka with a ``read()`` method which return
string).
Keyword Arguments:
filepath (string): Optional filepath to memorize if source comes
from a file. Default is ``None`` as if source comes from a
string. If ``source`` argument is a file-like object, you
should not need to bother of this argument since filepath will
be filled from source ``name`` attribute.
Returns:
dict: Dictionnary of serialized rules.
"""
# Set _path if source is a file-like object
try:
self._path = source.name
except AttributeError:
self._path = filepath
# Get source content either it's a string or a file-like object
try:
source_content = source.read()
except AttributeError:
source_content = source
# Parse and serialize given source
parser = TinycssSourceParser()
self._datas = parser.parse(source_content)
serializer = ManifestSerializer()
references = serializer.serialize(self._datas)
# Copy serialized metas
self.metas = serializer._metas
# Set every enabled rule as object attribute
for k, v in references.items():
self.set_rule(k, v)
return self._datas
def remove_rule(self, name):
"""
Remove a rule from attributes.
Arguments:
name (string): Rule name to remove.
"""
self._rule_attrs.remove(name)
delattr(self, name)
def to_json(self, indent=4):
"""
Serialize metas and reference attributes to a JSON string.
Keyword Arguments:
indent (int): Space indentation, default to ``4``.
Returns:
string: JSON datas.
"""
agregate = {
'metas': self.metas,
}
agregate.update({k: getattr(self, k) for k in self._rule_attrs})
return json.dumps(agregate, indent=indent)
|
sveetch/py-css-styleguide | py_css_styleguide/model.py | Manifest.remove_rule | python | def remove_rule(self, name):
self._rule_attrs.remove(name)
delattr(self, name) | Remove a rule from attributes.
Arguments:
name (string): Rule name to remove. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/model.py#L100-L108 | null | class Manifest(object):
"""
Manifest model
During load process, every rule is stored as object attribute so you can
reach them directly.
Attributes:
_path (string): Possible filepath for source if it has been given or
finded from source file-object.
_datas (dict): Dictionnary of every rules returned by parser. This
is not something you would need to reach commonly.
_rule_attrs (list): List of registered reference rules. You may use
it in iteration to find available reference attribute names.
metas (dict): Dictionnary of every metas returned by serializer.
"""
def __init__(self):
self._path = None
self._datas = None
self._rule_attrs = []
self.metas = {}
def load(self, source, filepath=None):
"""
Load source as manifest attributes
Arguments:
source (string or file-object): CSS source to parse and serialize
to find metas and rules. It can be either a string or a
file-like object (aka with a ``read()`` method which return
string).
Keyword Arguments:
filepath (string): Optional filepath to memorize if source comes
from a file. Default is ``None`` as if source comes from a
string. If ``source`` argument is a file-like object, you
should not need to bother of this argument since filepath will
be filled from source ``name`` attribute.
Returns:
dict: Dictionnary of serialized rules.
"""
# Set _path if source is a file-like object
try:
self._path = source.name
except AttributeError:
self._path = filepath
# Get source content either it's a string or a file-like object
try:
source_content = source.read()
except AttributeError:
source_content = source
# Parse and serialize given source
parser = TinycssSourceParser()
self._datas = parser.parse(source_content)
serializer = ManifestSerializer()
references = serializer.serialize(self._datas)
# Copy serialized metas
self.metas = serializer._metas
# Set every enabled rule as object attribute
for k, v in references.items():
self.set_rule(k, v)
return self._datas
def set_rule(self, name, properties):
"""
Set a rules as object attribute.
Arguments:
name (string): Rule name to set as attribute name.
properties (dict): Dictionnary of properties.
"""
self._rule_attrs.append(name)
setattr(self, name, properties)
def to_json(self, indent=4):
"""
Serialize metas and reference attributes to a JSON string.
Keyword Arguments:
indent (int): Space indentation, default to ``4``.
Returns:
string: JSON datas.
"""
agregate = {
'metas': self.metas,
}
agregate.update({k: getattr(self, k) for k in self._rule_attrs})
return json.dumps(agregate, indent=indent)
|
sveetch/py-css-styleguide | py_css_styleguide/model.py | Manifest.to_json | python | def to_json(self, indent=4):
agregate = {
'metas': self.metas,
}
agregate.update({k: getattr(self, k) for k in self._rule_attrs})
return json.dumps(agregate, indent=indent) | Serialize metas and reference attributes to a JSON string.
Keyword Arguments:
indent (int): Space indentation, default to ``4``.
Returns:
string: JSON datas. | train | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/model.py#L110-L126 | null | class Manifest(object):
"""
Manifest model
During load process, every rule is stored as object attribute so you can
reach them directly.
Attributes:
_path (string): Possible filepath for source if it has been given or
finded from source file-object.
_datas (dict): Dictionnary of every rules returned by parser. This
is not something you would need to reach commonly.
_rule_attrs (list): List of registered reference rules. You may use
it in iteration to find available reference attribute names.
metas (dict): Dictionnary of every metas returned by serializer.
"""
def __init__(self):
self._path = None
self._datas = None
self._rule_attrs = []
self.metas = {}
def load(self, source, filepath=None):
"""
Load source as manifest attributes
Arguments:
source (string or file-object): CSS source to parse and serialize
to find metas and rules. It can be either a string or a
file-like object (aka with a ``read()`` method which return
string).
Keyword Arguments:
filepath (string): Optional filepath to memorize if source comes
from a file. Default is ``None`` as if source comes from a
string. If ``source`` argument is a file-like object, you
should not need to bother of this argument since filepath will
be filled from source ``name`` attribute.
Returns:
dict: Dictionnary of serialized rules.
"""
# Set _path if source is a file-like object
try:
self._path = source.name
except AttributeError:
self._path = filepath
# Get source content either it's a string or a file-like object
try:
source_content = source.read()
except AttributeError:
source_content = source
# Parse and serialize given source
parser = TinycssSourceParser()
self._datas = parser.parse(source_content)
serializer = ManifestSerializer()
references = serializer.serialize(self._datas)
# Copy serialized metas
self.metas = serializer._metas
# Set every enabled rule as object attribute
for k, v in references.items():
self.set_rule(k, v)
return self._datas
def set_rule(self, name, properties):
"""
Set a rules as object attribute.
Arguments:
name (string): Rule name to set as attribute name.
properties (dict): Dictionnary of properties.
"""
self._rule_attrs.append(name)
setattr(self, name, properties)
def remove_rule(self, name):
"""
Remove a rule from attributes.
Arguments:
name (string): Rule name to remove.
"""
self._rule_attrs.remove(name)
delattr(self, name)
|
rbarrois/confutils | confutils/configfile.py | ConfigLineList.find_lines | python | def find_lines(self, line):
for other_line in self.lines:
if other_line.match(line):
yield other_line | Find all lines matching a given line. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L110-L114 | null | class ConfigLineList(object):
"""A list of ConfigLine."""
def __init__(self, *lines):
self.lines = list(lines)
def append(self, line):
self.lines.append(line)
def remove(self, line):
old_len = len(self.lines)
self.lines = [l for l in self.lines if not l.match(line)]
return old_len - len(self.lines)
def update(self, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
"""
nb = 0
for i, line in enumerate(self.lines):
if line.match(old_line):
self.lines[i] = new_line
nb += 1
if once:
return nb
return nb
def __contains__(self, line):
return any(self.find_lines(line))
def __bool__(self):
return bool(self.lines)
__nonzero__ = __bool__
def __len__(self):
return len(self.lines)
def __iter__(self):
return iter(self.lines)
def __eq__(self, other):
if not isinstance(other, ConfigLineList):
return NotImplemented
return self.lines == other.lines
def __hash__(self):
return hash((self.__class__, tuple(self.lines)))
def __repr__(self):
return 'ConfigLineList(%r)' % self.lines
|
rbarrois/confutils | confutils/configfile.py | ConfigLineList.update | python | def update(self, old_line, new_line, once=False):
nb = 0
for i, line in enumerate(self.lines):
if line.match(old_line):
self.lines[i] = new_line
nb += 1
if once:
return nb
return nb | Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L121-L133 | null | class ConfigLineList(object):
"""A list of ConfigLine."""
def __init__(self, *lines):
self.lines = list(lines)
def append(self, line):
self.lines.append(line)
def find_lines(self, line):
"""Find all lines matching a given line."""
for other_line in self.lines:
if other_line.match(line):
yield other_line
def remove(self, line):
old_len = len(self.lines)
self.lines = [l for l in self.lines if not l.match(line)]
return old_len - len(self.lines)
def __contains__(self, line):
return any(self.find_lines(line))
def __bool__(self):
return bool(self.lines)
__nonzero__ = __bool__
def __len__(self):
return len(self.lines)
def __iter__(self):
return iter(self.lines)
def __eq__(self, other):
if not isinstance(other, ConfigLineList):
return NotImplemented
return self.lines == other.lines
def __hash__(self):
return hash((self.__class__, tuple(self.lines)))
def __repr__(self):
return 'ConfigLineList(%r)' % self.lines
|
rbarrois/confutils | confutils/configfile.py | Section.update | python | def update(self, old_line, new_line, once=False):
nb = 0
for block in self.blocks:
nb += block.update(old_line, new_line, once=once)
if nb and once:
return nb
return nb | Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L215-L225 | null | class Section(object):
"""A section.
A section has a ``name`` and lines spread around the file.
"""
def __init__(self, name):
self.name = name
self.blocks = []
self.extra_block = None
def new_block(self, **kwargs):
block = SectionBlock(self.name, **kwargs)
self.blocks.append(block)
return block
def find_block(self, line):
"""Find the first block containing a line."""
for block in self.blocks:
if line in block:
return block
def find_lines(self, line):
for block in self.blocks:
for block_line in block:
if block_line.match(line):
yield block_line
def insert(self, line):
block = self.find_block(line)
if not block:
if self.blocks:
block = self.blocks[-1]
else:
block = self.extra_block = self.new_block()
block.append(line)
return block
def remove(self, line):
"""Delete all lines matching the given line."""
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb
def __iter__(self):
return iter(self.blocks)
def __repr__(self):
return '<Section: %s>' % self.name
|
rbarrois/confutils | confutils/configfile.py | Section.remove | python | def remove(self, line):
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb | Delete all lines matching the given line. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L227-L233 | null | class Section(object):
"""A section.
A section has a ``name`` and lines spread around the file.
"""
def __init__(self, name):
self.name = name
self.blocks = []
self.extra_block = None
def new_block(self, **kwargs):
block = SectionBlock(self.name, **kwargs)
self.blocks.append(block)
return block
def find_block(self, line):
"""Find the first block containing a line."""
for block in self.blocks:
if line in block:
return block
def find_lines(self, line):
for block in self.blocks:
for block_line in block:
if block_line.match(line):
yield block_line
def insert(self, line):
block = self.find_block(line)
if not block:
if self.blocks:
block = self.blocks[-1]
else:
block = self.extra_block = self.new_block()
block.append(line)
return block
def update(self, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
"""
nb = 0
for block in self.blocks:
nb += block.update(old_line, new_line, once=once)
if nb and once:
return nb
return nb
def __iter__(self):
return iter(self.blocks)
def __repr__(self):
return '<Section: %s>' % self.name
|
rbarrois/confutils | confutils/configfile.py | MultiValuedSectionView.add | python | def add(self, key, value):
self.configfile.add(self.name, key, value) | Add a new value for a key.
This differs from __setitem__ in adding a new value instead of updating
the list of values, thus avoiding the need to fetch the previous list of
values. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L298-L305 | null | class MultiValuedSectionView(BaseSectionView):
"""A SectionView where each key may have multiple values.
Always provide the list of expected values when setting.
"""
def __getitem__(self, key):
entries = list(self.configfile.get(self.name, key))
if not entries:
raise KeyError("No value defined for key %r in %r" % (key, self))
return entries
def __setitem__(self, key, values):
old_values = frozenset(self.get(key, []))
new_values = frozenset(values)
for removed in old_values - new_values:
self.configfile.remove(self.name, key, removed)
for added in new_values - old_values:
self.configfile.add(self.name, key, added)
def __delitem__(self, key):
removed = self.configfile.remove(self.name, key)
if not removed:
raise KeyError("No value defined for key %r in %r" % (key, self))
def iteritems(self):
values = dict()
order = []
for k, v in self.configfile.items(self.name):
values.setdefault(k, []).append(v)
if k not in order:
order.append(k)
for k in order:
yield k, values[k]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.