hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfc04bf7b06f7a16b471b91a283e601bd02d7ce
| 11,120
|
py
|
Python
|
webdriverwrapper2/wrapper.py
|
smiles1998/webdriverwrapper2
|
e3f9edb0776bf710ef2cbff0a8d0844d7bf80153
|
[
"MIT"
] | null | null | null |
webdriverwrapper2/wrapper.py
|
smiles1998/webdriverwrapper2
|
e3f9edb0776bf710ef2cbff0a8d0844d7bf80153
|
[
"MIT"
] | null | null | null |
webdriverwrapper2/wrapper.py
|
smiles1998/webdriverwrapper2
|
e3f9edb0776bf710ef2cbff0a8d0844d7bf80153
|
[
"MIT"
] | null | null | null |
import logging
from urllib.parse import urlparse, urlunparse, urlencode
from selenium.webdriver import *
import selenium.common.exceptions as selenium_exc
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s : %(message)s')
class _WebdriverBaseWrapper:
default_wait_timeout = 10
"""
Default timeout in seconds for wait* methods(such as wait_for_element).
"""
def click(self, *args, **kwargs):
"""
When you not pass any argument, it clicks on current element. If you
pass some arguments, it works as following snippet. For more info what
you can pass check out method :py:meth `~._WebdriverBaseWrapper.get_elm`.
... code-block:: python
driver.get_elm(id='someid').click()
"""
if args or kwargs:
elm = self.get_elm(*args, **kwargs)
elm.click()
else:
super().click()
def get_elm(self,
id_=None, class_name=None, name=None, tag_name=None,
parent_id=None, parent_class_name=None, parent_name=None, parent_tag_name=None,
xpath=None, css_selector=None):
"""
Returns first found element. This method uses
:py:meth: `~._WebdriverBaseWrapper.get_elms`.
"""
elms = self.get_elms(
id_, class_name, name, tag_name,
parent_id, parent_class_name, parent_name, parent_tag_name,
xpath, css_selector
)
if not elms:
raise selenium_exc.NoSuchElementException()
return elms[0]
def get_elms(self,
id_=None, class_name=None, name=None, tag_name=None,
parent_id=None, parent_class_name=None, parent_name=None, parent_tag_name=None,
xpath=None, css_selector=None):
"""
Shortcut for :py:meth: `find_element* <selenium.webdriver.remote.webelement.WebElement.find_element>`
methods. It's shorter, and you can quickly find element in element.
... code-block:: python
elm = driver.find_element_by_id('someid')
elm.find_elements_by_class_name('someclass')
# vs.
elm = driver.get_elms(parent_id='someid', class_name='someclass')
"""
if parent_id or parent_class_name or parent_name or parent_tag_name:
parent = self.get_elm(parent_id, parent_class_name, parent_name, parent_tag_name)
else:
parent = self
if len([x for x in (id_, class_name, name, tag_name, xpath, css_selector) if x is not None]) > 1:
raise Exception('You can find element only by one param.')
if id_ is not None:
return parent.find_elements(by=By.ID, value=id_)
if class_name is not None:
return parent.find_elements(by=By.CLASS_NAME, value=class_name)
if name is not None:
return parent.find_elements(by=By.NAME, value=name)
if tag_name is not None:
return parent.find_elements(by=By.TAG_NAME, value=tag_name)
if xpath is not None:
return parent.find_elements(by=By.XPATH, value=xpath)
if css_selector is not None:
return parent.find_elements(by=By.CSS_SELECTOR, value=css_selector)
raise Exception('You must specify id or name of element on which you want to click.')
def wait_for_element(self, timeout=None, message='', *args, **kwargs):
"""
Shortcut for waiting for element. If it not ends with exception, it
returns that element. Default timeout is `~.default_wait_timeout`.
Same as following:
... code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...))
"""
if not timeout:
timeout = self.default_wait_timeout
self.wait(timeout).until(lambda driver: driver.get_elm(*args, **kwargs), message=message)
elm = self.get_elm(*args, **kwargs)
return elm
def wait_for_element_show(self, timeout=None, message='', *args, **kwargs):
"""
Shortcut for waiting for visible element. If it not ends with exception, it
returns that element. Default timeout is `~.default_wait_timeout`.
Some as following:
... code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...))
"""
if not timeout:
timeout = self.default_wait_timeout
def callback(driver):
elms = self.get_elms(*args, **kwargs)
if not elms:
return False
try:
if all(not elm.is_displayed() for elm in elms):
return False
except selenium_exc.StaleElementReferenceException:
return False
return True
self.wait(timeout).until(callback, message=message)
elm = self.get_elm(*args, **kwargs)
return elm
def wait_for_element_hide(self, timeout=None, message='', *args, **kwargs):
"""
Shortcut for waiting for hiding of element. Detault timeout is `~.default_wait_timeout`.
Same as following:
... code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: not driver.get_elm(...))
"""
if not timeout:
timeout = self.default_wait_timeout
def callback(driver):
elms = self.get_elms(*args, **kwargs)
if not elms:
return True
try:
if all(not elm.is_displayed() for elm in elms):
return True
except selenium_exc.StaleElementReferenceException:
return False
return False
self.wait(timeout).until(callback, message=message)
def wait(self, timeout=None):
"""
Call following snippet, so you don't have to remember what import. See
:py:obj:`WebDriverWait <selenium.webdriver.support.wait.WebDriverWait>` for more
information. Detault timeout is `~.default_wait_timeout`.
... code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout)
Example:
... code-block:: python
driver.wait().until(lambda driver: len(driver.find_element_by_id('elm')) > 10)
"""
if not timeout:
timeout = self.default_wait_timeout
return WebDriverWait(self, timeout)
class _WebdriverWrapper(_WebdriverBaseWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def _driver(self):
"""
Returns always driver, not element. Use it when you need driver
and variable can be driver or element.
"""
return self
@property
def html(self):
"""
Returns ``innerHTML`` of whole page. On page have to be tag ``body``.
"""
try:
body = self.get_elm(tag_name='body')
except selenium_exc.NoSuchElementException:
return None
else:
return body.get_attribute('innerHTML')
def break_point(self):
"""
Stops testing and wait for pressing enter to continue.
Useful when you need check Chrome console for some info for example.
"""
logging.info('Break point. Type enter to continue.')
input()
def get_url(self, path=None, query=None):
if urlparse(path).netloc:
return path
if isinstance(query, dict):
query = urlencode(query)
url_parts = urlparse(self.current_url)
new_url_parts = (
url_parts.scheme,
url_parts.netloc,
path or url_parts.path,
None, # params
query,
None # fragment
)
url = urlunparse(new_url_parts)
return url
def switch_to_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements switching to other window only by it's name. With
wrapper there is also option to switch by title of window or URL. URL
can be also relative path.
"""
if window_name:
self.switch_to.window(window_name)
return
if url:
url = self.get_url(path=url)
for window_handle in self.window_handles:
self.switch_to.window(window_handle)
if title and self.title == title:
return
if url and self.current_url == url:
return
raise selenium_exc.NoSuchWindowException('Window (title=%s, url=%s) not found.' % (title, url))
def close_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements only closing current window. If you want to close
some window without having to switch to it, use this method.
"""
main_window_handle = self.current_window_handle
self.switch_to_window(window_name, title, url)
self.close()
self.switch_to_window(main_window_handle)
def close_other_windows(self):
"""
Closes all not current windows. Useful for tests - after each test you
can automatically close all windows.
"""
main_window_handle = self.current_window_handle
for window_handle in self.window_handles:
if window_handle == main_window_handle:
continue
self.switch_to_window(window_handle)
self.close()
self.switch_to_window(main_window_handle)
def close_alert(self, ignore_exception=False):
"""
JS alerts all blocking. This method closes it. If there is no alert,
method raises exception. In tests is good to call this method with
``ignore_exception`` setted to ``True`` which will ignore any exception.
"""
try:
alert = self.get_alert()
alert.accept()
except:
if not ignore_exception:
raise
def get_alert(self):
"""
Returns instance of :py:obj:`~selenium.webdriver.common.alert.Alert`.
"""
return Alert(self)
def wait_for_alert(self, timeout=None):
"""
Shortcut for waiting for alert. If it not ends with exception, it
returns that alert. Detault timeout is `~.default_wait_timeout`.
"""
if not timeout:
timeout = self.default_wait_timeout
alert = Alert(self)
def alert_shown(driver):
try:
alert.text
return True
except selenium_exc.NoAlertPresentException:
return False
self.wait(timeout).until(alert_shown)
return alert
class Chrome(_WebdriverWrapper, Chrome):
pass
| 35.414013
| 120
| 0.609083
|
acfc0743b462adef76788d1b0526d07db806b6d7
| 1,953
|
py
|
Python
|
qa/rpc-tests/timestampindex.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/timestampindex.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/timestampindex.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test timestampindex generation and fetching
#
import time
from test_framework.test_framework import BKSTestFramework
from test_framework.util import *
class TimestampIndexTest(BKSTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-timestampindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-timestampindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining 25 blocks...")
blockhashes = self.nodes[0].generate(25)
time.sleep(3)
print("Mining 25 blocks...")
blockhashes.extend(self.nodes[0].generate(25))
time.sleep(3)
print("Mining 25 blocks...")
blockhashes.extend(self.nodes[0].generate(25))
self.sync_all()
low = self.nodes[1].getblock(blockhashes[0])["time"]
high = low + 76
print("Checking timestamp index...")
hashes = self.nodes[1].getblockhashes(high, low)
assert_equal(len(hashes), len(blockhashes))
assert_equal(hashes, blockhashes)
print("Passed\n")
if __name__ == '__main__':
TimestampIndexTest().main()
| 31.5
| 92
| 0.655402
|
acfc0786ca1578a5f64829bfa89f79d338e64ede
| 986
|
py
|
Python
|
src/repositories/property_repository.py
|
stivenramireza/real-estate-api
|
b6de176bed5612ce2ad874f622d4675fc4b6d5a5
|
[
"MIT"
] | null | null | null |
src/repositories/property_repository.py
|
stivenramireza/real-estate-api
|
b6de176bed5612ce2ad874f622d4675fc4b6d5a5
|
[
"MIT"
] | 2
|
2022-03-22T17:27:19.000Z
|
2022-03-22T18:29:14.000Z
|
src/repositories/property_repository.py
|
stivenramireza/real-estate-api
|
b6de176bed5612ce2ad874f622d4675fc4b6d5a5
|
[
"MIT"
] | null | null | null |
from src.repositories.database_repository import db_connection, DatabaseRepository
from src.utils.queries import (
PROPERTIES,
STATUS_FILTER,
YEAR_FILTER,
CITY_FILTER,
)
class PropertyRepository:
database_repository = DatabaseRepository()
def get_properties(self, status: str, year: int, city: str) -> list[dict[str, any]]:
properties_to_search = [PROPERTIES]
if status or year or city:
if status:
properties_to_search.append(STATUS_FILTER.format(status))
if year:
properties_to_search.append(YEAR_FILTER.format(year))
if city:
properties_to_search.append(CITY_FILTER.format(city))
properties_to_search = " AND ".join(properties_to_search)
else:
properties_to_search = PROPERTIES
properties = self.database_repository.read_from_db(
properties_to_search, db_connection
)
return properties
| 30.8125
| 88
| 0.662272
|
acfc088378fc51d0cd3bc52deaf9e7243e13e999
| 292
|
py
|
Python
|
whosaidwhat/candidates/apps.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
whosaidwhat/candidates/apps.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
whosaidwhat/candidates/apps.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CandidatesConfig(AppConfig):
name = 'whosaidwhat.candidates'
verbose_name = "Candidates"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| 20.857143
| 37
| 0.619863
|
acfc0a4bc593935dc1d8fe26a77075b7fae5699a
| 2,448
|
py
|
Python
|
pydal/adapters/__init__.py
|
pav0n/pydal
|
311790cf334645ab77b15d161f72282da856b8bb
|
[
"BSD-3-Clause"
] | null | null | null |
pydal/adapters/__init__.py
|
pav0n/pydal
|
311790cf334645ab77b15d161f72282da856b8bb
|
[
"BSD-3-Clause"
] | null | null | null |
pydal/adapters/__init__.py
|
pav0n/pydal
|
311790cf334645ab77b15d161f72282da856b8bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .sqlite import SQLiteAdapter, SpatiaLiteAdapter, JDBCSQLiteAdapter
from .mysql import MySQLAdapter
from .postgres import PostgreSQLAdapter, NewPostgreSQLAdapter, JDBCPostgreSQLAdapter
from .oracle import OracleAdapter
from .mssql import MSSQLAdapter, MSSQL2Adapter, MSSQL3Adapter, MSSQL4Adapter, \
VerticaAdapter, SybaseAdapter
from .firebird import FireBirdAdapter
from .informix import InformixAdapter, InformixSEAdapter
from .db2 import DB2Adapter
from .teradata import TeradataAdapter
from .ingres import IngresAdapter, IngresUnicodeAdapter
from .sapdb import SAPDBAdapter
from .cubrid import CubridAdapter
from .couchdb import CouchDBAdapter
from .mongo import MongoDBAdapter
from .imap import IMAPAdapter
from .cassandra import CassandraAdapter
ADAPTERS = {
'sqlite': SQLiteAdapter,
'spatialite': SpatiaLiteAdapter,
'sqlite:memory': SQLiteAdapter,
'spatialite:memory': SpatiaLiteAdapter,
'mysql': MySQLAdapter,
'postgres': PostgreSQLAdapter,
'postgres:psycopg2': PostgreSQLAdapter,
'postgres:pg8000': PostgreSQLAdapter,
'postgres2:psycopg2': NewPostgreSQLAdapter,
'postgres2:pg8000': NewPostgreSQLAdapter,
'oracle': OracleAdapter,
'mssql': MSSQLAdapter,
'mssql2': MSSQL2Adapter,
'mssql3': MSSQL3Adapter,
'mssql4' : MSSQL4Adapter,
'vertica': VerticaAdapter,
'sybase': SybaseAdapter,
'db2:ibm_db_dbi': DB2Adapter,
'db2:pyodbc': DB2Adapter,
'teradata': TeradataAdapter,
'informix': InformixAdapter,
'informix-se': InformixSEAdapter,
'firebird': FireBirdAdapter,
'firebird_embedded': FireBirdAdapter,
'ingres': IngresAdapter,
'ingresu': IngresUnicodeAdapter,
'sapdb': SAPDBAdapter,
'cubrid': CubridAdapter,
'jdbc:sqlite': JDBCSQLiteAdapter,
'jdbc:sqlite:memory': JDBCSQLiteAdapter,
'jdbc:postgres': JDBCPostgreSQLAdapter,
'couchdb': CouchDBAdapter,
'mongodb': MongoDBAdapter,
'imap': IMAPAdapter,
'cassandra':CassandraAdapter
}
try:
from .google import GoogleDatastoreAdapter, GoogleSQLAdapter
# discouraged, for backward compatibility
ADAPTERS['gae'] = GoogleDatastoreAdapter
# add gae adapters
ADAPTERS['google:datastore'] = GoogleDatastoreAdapter
ADAPTERS['google:datastore+ndb'] = GoogleDatastoreAdapter
ADAPTERS['google:sql'] = GoogleSQLAdapter
except:
# don't bother, we're not on Google AppEngine
GoogleDatastoreAdapter = None
| 34.971429
| 84
| 0.752859
|
acfc0b941019439c0b1dd504e11636177ed09944
| 1,823
|
py
|
Python
|
agileLibrary/agileLibrary/main/models/resources.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
agileLibrary/agileLibrary/main/models/resources.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
agileLibrary/agileLibrary/main/models/resources.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
from django.db import models
class Book(models.Model):
ISBN_MAX_LENGTH = 13
TITLE_MAX_LENGTH = 300
AUTHORS_MAX_LENGTH = 300
PUBLISHER_MAX_LENGTH = 300
title = models.CharField(
max_length=TITLE_MAX_LENGTH,
)
authors = models.CharField(
max_length=AUTHORS_MAX_LENGTH,
)
isbn = models.CharField(
max_length=ISBN_MAX_LENGTH,
)
publisher = models.CharField(
max_length=PUBLISHER_MAX_LENGTH,
blank=True,
null=True,
)
description = models.TextField(
blank=True,
null=True,
)
quantity = models.IntegerField(
default=1,
)
is_taken = models.BooleanField(
default=False,
)
is_reserved = models.BooleanField(
default=False,
)
class Paper(models.Model):
TITLE_MAX_LENGTH = 300
AUTHORS_NAMES_MAX_LENGTH = 300
title = models.CharField(
max_length=TITLE_MAX_LENGTH,
)
authors = models.CharField(
max_length=AUTHORS_NAMES_MAX_LENGTH,
)
doi = models.URLField(
blank=True,
null=True,
)
published = models.SmallIntegerField()
description = models.TextField(
blank=True,
null=True,
)
is_taken = models.BooleanField(
default=False,
)
is_reserved = models.BooleanField(
default=False,
)
class Laptop(models.Model):
ID_MAX_LENGTH = 10
MANUFACTURER_MAX_LENGTH = 30
MODEL_MAX_LENGTH = 30
TYPE_MAX_LENGTH = 20
laptop_id = models.CharField(
max_length=ID_MAX_LENGTH,
)
manufacturer = models.CharField(
max_length=MANUFACTURER_MAX_LENGTH,
)
model = models.CharField(
max_length=MODEL_MAX_LENGTH,
)
type = models.CharField(
max_length=TYPE_MAX_LENGTH,
)
| 18.049505
| 44
| 0.62644
|
acfc0babfc4693681b11350b4a6d3091812ef84c
| 12,272
|
py
|
Python
|
bop_toolkit_lib/pose_error.py
|
federicotomat/bop_toolkit
|
30a93d312ebf3a7b12cb8e93ab721800316e0887
|
[
"MIT"
] | null | null | null |
bop_toolkit_lib/pose_error.py
|
federicotomat/bop_toolkit
|
30a93d312ebf3a7b12cb8e93ab721800316e0887
|
[
"MIT"
] | null | null | null |
bop_toolkit_lib/pose_error.py
|
federicotomat/bop_toolkit
|
30a93d312ebf3a7b12cb8e93ab721800316e0887
|
[
"MIT"
] | null | null | null |
# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
"""Implementation of the pose error functions described in:
Hodan, Michel et al., "BOP: Benchmark for 6D Object Pose Estimation", ECCV'18
Hodan et al., "On Evaluation of 6D Object Pose Estimation", ECCVW'16
"""
import math
import numpy as np
from scipy import spatial
from auto_pose.bop_toolkit.bop_toolkit_lib import misc
from auto_pose.bop_toolkit.bop_toolkit_lib import visibility
def vsd(R_est, t_est, R_gt, t_gt, depth_test, K, delta, taus,
normalized_by_diameter, diameter, renderer, obj_id, cost_type='step'):
"""Visible Surface Discrepancy -- by Hodan, Michel et al. (ECCV 2018).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param depth_test: hxw ndarray with the test depth image.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param delta: Tolerance used for estimation of the visibility masks.
:param taus: A list of misalignment tolerance values.
:param normalized_by_diameter: Whether to normalize the pixel-wise distances
by the object diameter.
:param diameter: Object diameter.
:param renderer: Instance of the Renderer class (see renderer.py).
:param obj_id: Object identifier.
:param cost_type: Type of the pixel-wise matching cost:
'tlinear' - Used in the original definition of VSD in:
Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16
'step' - Used for SIXD Challenge 2017 onwards.
:return: List of calculated errors (one for each misalignment tolerance).
"""
# Render depth images of the model in the estimated and the ground-truth pose.
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
depth_est = renderer.render_object(
obj_id, R_est, t_est, fx, fy, cx, cy)['depth']
depth_gt = renderer.render_object(
obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth']
# Convert depth images to distance images.
dist_test = misc.depth_im_to_dist_im_fast(depth_test, K)
dist_gt = misc.depth_im_to_dist_im_fast(depth_gt, K)
dist_est = misc.depth_im_to_dist_im_fast(depth_est, K)
# Visibility mask of the model in the ground-truth pose.
visib_gt = visibility.estimate_visib_mask_gt(
dist_test, dist_gt, delta, visib_mode='bop19')
# Visibility mask of the model in the estimated pose.
visib_est = visibility.estimate_visib_mask_est(
dist_test, dist_est, visib_gt, delta, visib_mode='bop19')
# Intersection and union of the visibility masks.
visib_inter = np.logical_and(visib_gt, visib_est)
visib_union = np.logical_or(visib_gt, visib_est)
visib_union_count = visib_union.sum()
visib_comp_count = visib_union_count - visib_inter.sum()
# Pixel-wise distances.
dists = np.abs(dist_gt[visib_inter] - dist_est[visib_inter])
# Normalization of pixel-wise distances by object diameter.
if normalized_by_diameter:
dists /= diameter
# Calculate VSD for each provided value of the misalignment tolerance.
if visib_union_count == 0:
errors = [1.0] * len(taus)
else:
errors = []
for tau in taus:
# Pixel-wise matching cost.
if cost_type == 'step':
costs = dists >= tau
elif cost_type == 'tlinear': # Truncated linear function.
costs = dists / tau
costs[costs > 1.0] = 1.0
else:
raise ValueError('Unknown pixel matching cost.')
e = (np.sum(costs) + visib_comp_count) / float(visib_union_count)
errors.append(e)
return errors
def mssd(R_est, t_est, R_gt, t_gt, pts, syms):
"""Maximum Symmetry-Aware Surface Distance (MSSD).
See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:param syms: Set of symmetry transformations, each given by a dictionary with:
- 'R': 3x3 ndarray with the rotation matrix.
- 't': 3x1 ndarray with the translation vector.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
es = []
for sym in syms:
R_gt_sym = R_gt.dot(sym['R'])
t_gt_sym = R_gt.dot(sym['t']) + t_gt
pts_gt_sym = misc.transform_pts_Rt(pts, R_gt_sym, t_gt_sym)
es.append(np.linalg.norm(pts_est - pts_gt_sym, axis=1).max())
return min(es)
def mspd(R_est, t_est, R_gt, t_gt, K, pts, syms):
"""Maximum Symmetry-Aware Projection Distance (MSPD).
See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with the intrinsic camera matrix.
:param pts: nx3 ndarray with 3D model points.
:param syms: Set of symmetry transformations, each given by a dictionary with:
- 'R': 3x3 ndarray with the rotation matrix.
- 't': 3x1 ndarray with the translation vector.
:return: The calculated error.
"""
proj_est = misc.project_pts(pts, K, R_est, t_est)
es = []
for sym in syms:
R_gt_sym = R_gt.dot(sym['R'])
t_gt_sym = R_gt.dot(sym['t']) + t_gt
proj_gt_sym = misc.project_pts(pts, K, R_gt_sym, t_gt_sym)
es.append(np.linalg.norm(proj_est - proj_gt_sym, axis=1).max())
return min(es)
def add(R_est, t_est, R_gt, t_gt, pts):
"""Average Distance of Model Points for objects with no indistinguishable
views - by Hinterstoisser et al. (ACCV'12).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt)
e = np.linalg.norm(pts_est - pts_gt, axis=1).mean()
return e
def adi(R_est, t_est, R_gt, t_gt, pts):
"""Average Distance of Model Points for objects with indistinguishable views
- by Hinterstoisser et al. (ACCV'12).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt)
# Calculate distances to the nearest neighbors from vertices in the
# ground-truth pose to vertices in the estimated pose.
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def re(R_est, R_gt):
"""Rotational Error.
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:return: The calculated error.
"""
assert (R_est.shape == R_gt.shape == (3, 3))
error_cos = float(0.5 * (np.trace(R_est.dot(np.linalg.inv(R_gt))) - 1.0))
# Avoid invalid values due to numerical errors.
error_cos = min(1.0, max(-1.0, error_cos))
error = math.acos(error_cos)
error = 180.0 * error / np.pi # Convert [rad] to [deg].
return error
def te(t_est, t_gt):
"""Translational Error.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:return: The calculated error.
"""
assert (t_est.size == t_gt.size == 3)
error = np.linalg.norm(t_gt - t_est)
return error
def proj(R_est, t_est, R_gt, t_gt, K, pts):
"""Average distance of projections of object model vertices [px]
- by Brachmann et al. (CVPR'16).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
proj_est = misc.project_pts(pts, K, R_est, t_est)
proj_gt = misc.project_pts(pts, K, R_gt, t_gt)
e = np.linalg.norm(proj_est - proj_gt, axis=1).mean()
return e
def cou_mask(mask_est, mask_gt):
"""Complement over Union of 2D binary masks.
:param mask_est: hxw ndarray with the estimated mask.
:param mask_gt: hxw ndarray with the ground-truth mask.
:return: The calculated error.
"""
mask_est_bool = mask_est.astype(np.bool)
mask_gt_bool = mask_gt.astype(np.bool)
inter = np.logical_and(mask_gt_bool, mask_est_bool)
union = np.logical_or(mask_gt_bool, mask_est_bool)
union_count = float(union.sum())
if union_count > 0:
e = 1.0 - inter.sum() / union_count
else:
e = 1.0
return e
def cus(R_est, t_est, R_gt, t_gt, K, renderer, obj_id):
"""Complement over Union of projected 2D masks.
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param renderer: Instance of the Renderer class (see renderer.py).
:param obj_id: Object identifier.
:return: The calculated error.
"""
# Render depth images of the model at the estimated and the ground-truth pose.
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
depth_est = renderer.render_object(
obj_id, R_est, t_est, fx, fy, cx, cy)['depth']
depth_gt = renderer.render_object(
obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth']
# Masks of the rendered model and their intersection and union.
mask_est = depth_est > 0
mask_gt = depth_gt > 0
inter = np.logical_and(mask_gt, mask_est)
union = np.logical_or(mask_gt, mask_est)
union_count = float(union.sum())
if union_count > 0:
e = 1.0 - inter.sum() / union_count
else:
e = 1.0
return e
def cou_bb(bb_est, bb_gt):
"""Complement over Union of 2D bounding boxes.
:param bb_est: The estimated bounding box (x1, y1, w1, h1).
:param bb_gt: The ground-truth bounding box (x2, y2, w2, h2).
:return: The calculated error.
"""
e = 1.0 - misc.iou(bb_est, bb_gt)
return e
def cou_bb_proj(R_est, t_est, R_gt, t_gt, K, renderer, obj_id):
"""Complement over Union of projected 2D bounding boxes.
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param renderer: Instance of the Renderer class (see renderer.py).
:param obj_id: Object identifier.
:return: The calculated error.
"""
# Render depth images of the model at the estimated and the ground-truth pose.
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
depth_est = renderer.render_object(
obj_id, R_est, t_est, fx, fy, cx, cy)['depth']
depth_gt = renderer.render_object(
obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth']
# Masks of the rendered model and their intersection and union
mask_est = depth_est > 0
mask_gt = depth_gt > 0
ys_est, xs_est = mask_est.nonzero()
bb_est = misc.calc_2d_bbox(xs_est, ys_est, im_size=None, clip=False)
ys_gt, xs_gt = mask_gt.nonzero()
bb_gt = misc.calc_2d_bbox(xs_gt, ys_gt, im_size=None, clip=False)
e = 1.0 - misc.iou(bb_est, bb_gt)
return e
| 37.075529
| 80
| 0.714716
|
acfc0d2073ea2748926e5100d18f23e959b982c6
| 1,439
|
py
|
Python
|
examples/undocumented/python/regression_randomforest.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | 2
|
2015-01-13T15:18:27.000Z
|
2015-05-01T13:28:48.000Z
|
examples/undocumented/python/regression_randomforest.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/undocumented/python/regression_randomforest.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from numpy import array, random
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set input attribute as not nominal (ie. continuous)
feattypes = array([False])
parameter_list = [[500,50,15,0.2,feattypes]]
def regression_randomforest(num_train=500,num_test=50,x_range=15,noise_var=0.2,ft=feattypes):
try:
from shogun import RealFeatures, RegressionLabels, CSVFile, RandomForest, MeanRule, PT_REGRESSION
except ImportError:
print("Could not import Shogun modules")
return
random.seed(1)
# form training dataset : y=x with noise
X_train=random.rand(1,num_train)*x_range;
Y_train=X_train+random.randn(num_train)*noise_var
# form test dataset
X_test=array([[float(i)/num_test*x_range for i in range(num_test)]])
# wrap features and labels into Shogun objects
feats_train=RealFeatures(X_train)
feats_test=RealFeatures(X_test)
train_labels=RegressionLabels(Y_train[0])
# Random Forest formation
rand_forest=RandomForest(feats_train,train_labels,20,1)
rand_forest.set_feature_types(ft)
rand_forest.set_machine_problem_type(PT_REGRESSION)
rand_forest.set_combination_rule(MeanRule())
rand_forest.train()
# Regress test data
output=rand_forest.apply_regression(feats_test).get_labels()
return rand_forest,output
if __name__=='__main__':
print('RandomForest')
regression_randomforest(*parameter_list[0])
| 29.367347
| 99
| 0.788047
|
acfc0d75f3867d09606ceb69786bf02589a80da4
| 102,176
|
py
|
Python
|
magnum/tests/unit/drivers/test_template_definition.py
|
QumulusTechnology/magnum
|
53f5e804d1ee5bbe934f25b71a8df00f5d1a9ab9
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/unit/drivers/test_template_definition.py
|
QumulusTechnology/magnum
|
53f5e804d1ee5bbe934f25b71a8df00f5d1a9ab9
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/unit/drivers/test_template_definition.py
|
QumulusTechnology/magnum
|
53f5e804d1ee5bbe934f25b71a8df00f5d1a9ab9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Rackspace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutronclient.common import exceptions as n_exception
from unittest import mock
import six
from magnum.common import exception
import magnum.conf
from magnum.drivers.common import driver
from magnum.drivers.heat import template_def as cmn_tdef
from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr
from magnum.drivers.k8s_coreos_v1 import template_def as k8s_coreos_tdef
from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8sa_dr
from magnum.drivers.k8s_fedora_atomic_v1 import template_def as k8sa_tdef
from magnum.drivers.k8s_fedora_ironic_v1 import driver as k8s_i_dr
from magnum.drivers.k8s_fedora_ironic_v1 import template_def as k8si_tdef
from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr
from magnum.drivers.mesos_ubuntu_v1 import template_def as mesos_tdef
from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr
from magnum.drivers.swarm_fedora_atomic_v1 import template_def as swarm_tdef
from magnum.drivers.swarm_fedora_atomic_v2 import driver as swarm_v2_dr
from magnum.drivers.swarm_fedora_atomic_v2 import template_def as swarm_v2_tdef
from magnum.tests import base
from requests import exceptions as req_exceptions
CONF = magnum.conf.CONF
class TemplateDefinitionTestCase(base.TestCase):
@mock.patch.object(driver, 'iter_entry_points')
def test_load_entry_points(self, mock_iter_entry_points):
mock_entry_point = mock.MagicMock()
mock_entry_points = [mock_entry_point]
mock_iter_entry_points.return_value = mock_entry_points.__iter__()
entry_points = driver.Driver.load_entry_points()
for (expected_entry_point,
(actual_entry_point, loaded_cls)) in zip(mock_entry_points,
entry_points):
self.assertEqual(expected_entry_point, actual_entry_point)
expected_entry_point.load.assert_called_once_with(require=False)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_vm_atomic_kubernetes_definition(self, mock_driver):
mock_driver.return_value = k8sa_dr.Driver()
cluster_driver = driver.Driver.get_driver('vm',
'fedora-atomic',
'kubernetes')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
k8sa_tdef.AtomicK8sTemplateDefinition)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_bm_fedora_kubernetes_ironic_definition(self, mock_driver):
mock_driver.return_value = k8s_i_dr.Driver()
cluster_driver = driver.Driver.get_driver('bm',
'fedora',
'kubernetes')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
k8si_tdef.FedoraK8sIronicTemplateDefinition)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_vm_coreos_kubernetes_definition(self, mock_driver):
mock_driver.return_value = k8s_coreos_dr.Driver()
cluster_driver = driver.Driver.get_driver('vm', 'coreos', 'kubernetes')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
k8s_coreos_tdef.CoreOSK8sTemplateDefinition)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_vm_atomic_swarm_definition(self, mock_driver):
mock_driver.return_value = swarm_dr.Driver()
cluster_driver = driver.Driver.get_driver('vm',
'fedora-atomic',
'swarm')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
swarm_tdef.AtomicSwarmTemplateDefinition)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_vm_atomic_swarm_v2_definition(self, mock_driver):
mock_driver.return_value = swarm_v2_dr.Driver()
cluster_driver = driver.Driver.get_driver('vm',
'fedora-atomic',
'swarm-mode')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
swarm_v2_tdef.AtomicSwarmTemplateDefinition)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver')
def test_get_vm_ubuntu_mesos_definition(self, mock_driver):
mock_driver.return_value = mesos_dr.Driver()
cluster_driver = driver.Driver.get_driver('vm',
'ubuntu',
'mesos')
definition = cluster_driver.get_template_definition()
self.assertIsInstance(definition,
mesos_tdef.UbuntuMesosTemplateDefinition)
def test_get_driver_not_supported(self):
self.assertRaises(exception.ClusterTypeNotSupported,
driver.Driver.get_driver,
'vm', 'not_supported', 'kubernetes')
def test_required_param_not_set(self):
param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test',
required=True)
mock_cluster_template = mock.MagicMock()
mock_cluster_template.test = None
self.assertRaises(exception.RequiredParameterNotProvided,
param.set_param, {}, mock_cluster_template, None)
def test_output_mapping(self):
heat_outputs = [
{
"output_value": "value1",
"description": "No description given",
"output_key": "key1"
},
{
"output_value": ["value2", "value3"],
"description": "No description given",
"output_key": "key2"
}
]
mock_stack = mock.MagicMock()
mock_cluster = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': heat_outputs}
output = cmn_tdef.OutputMapping('key1')
value = output.get_output_value(mock_stack, mock_cluster)
self.assertEqual('value1', value)
output = cmn_tdef.OutputMapping('key2')
value = output.get_output_value(mock_stack, mock_cluster)
self.assertEqual(["value2", "value3"], value)
output = cmn_tdef.OutputMapping('key3')
value = output.get_output_value(mock_stack, mock_cluster)
self.assertIsNone(value)
# verify stack with no 'outputs' attribute
mock_stack.to_dict.return_value = {}
output = cmn_tdef.OutputMapping('key1')
value = output.get_output_value(mock_stack, mock_cluster)
self.assertIsNone(value)
def test_add_output_with_mapping_type(self):
definition = k8sa_dr.Driver().get_template_definition()
mock_args = [1, 3, 4]
mock_kwargs = {'cluster_attr': 'test'}
mock_mapping_type = mock.MagicMock()
mock_mapping_type.return_value = mock.MagicMock()
definition.add_output(mapping_type=mock_mapping_type, *mock_args,
**mock_kwargs)
mock_mapping_type.assert_called_once_with(*mock_args, **mock_kwargs)
self.assertIn(mock_mapping_type.return_value,
definition.output_mappings)
def test_add_fip_env_lb_disabled_with_fp(self):
mock_cluster = mock.MagicMock(master_lb_enabled=False, labels={})
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_enabled_with_fp(self):
mock_cluster = mock.MagicMock(floating_ip_enabled=True,
master_lb_enabled=True,
labels={})
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_disabled_without_fp(self):
mock_cluster = mock.MagicMock(labels={}, floating_ip_enabled=False)
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_enabled_without_fp(self):
mock_cluster = mock.MagicMock(labels={}, floating_ip_enabled=False,)
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_fip_enabled_without_fp(self):
mock_cluster = mock.MagicMock(
labels={"master_lb_floating_ip_enabled": "true"},
floating_ip_enabled=False,)
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_enable_lbfip_disable(self):
mock_cluster = mock.MagicMock(
labels={"master_lb_floating_ip_enabled": "false"},
floating_ip_enabled=False,)
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'disable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_env_lb_enable_lbfip_template_disable_cluster_enable(self):
mock_cluster = mock.MagicMock(
floating_ip_enabled=True,
labels={})
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml'
],
env_files
)
def test_add_fip_master_lb_fip_disabled_cluster_fip_enabled(self):
mock_cluster = mock.MagicMock(
labels={"master_lb_floating_ip_enabled": "false"},
floating_ip_enabled=True,)
env_files = []
cmn_tdef.add_fip_env_file(env_files, mock_cluster)
self.assertEqual(
[
cmn_tdef.COMMON_ENV_PATH + 'enable_floating_ip.yaml',
cmn_tdef.COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml'
],
env_files
)
@six.add_metaclass(abc.ABCMeta)
class BaseK8sTemplateDefinitionTestCase(base.TestCase):
def setUp(self):
super(BaseK8sTemplateDefinitionTestCase, self).setUp()
self.master_ng = mock.MagicMock(uuid='master_ng', role='master')
self.worker_ng = mock.MagicMock(uuid='worker_ng', role='worker')
self.nodegroups = [self.master_ng, self.worker_ng]
self.mock_cluster = mock.MagicMock(nodegroups=self.nodegroups,
default_ng_worker=self.worker_ng,
default_ng_master=self.master_ng)
@abc.abstractmethod
def get_definition(self):
"""Returns the template definition."""
pass
def _test_update_outputs_server_address(
self,
floating_ip_enabled=True,
public_ip_output_key='kube_masters',
private_ip_output_key='kube_masters_private',
cluster_attr=None,
nodegroup_attr=None,
is_master=False
):
definition = self.get_definition()
expected_address = expected_public_address = ['public']
expected_private_address = ['private']
if not floating_ip_enabled:
expected_address = expected_private_address
outputs = [
{"output_value": expected_public_address,
"description": "No description given",
"output_key": public_ip_output_key},
{"output_value": expected_private_address,
"description": "No description given",
"output_key": private_ip_output_key},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster_template = mock.MagicMock()
mock_cluster_template.floating_ip_enabled = floating_ip_enabled
self.mock_cluster.floating_ip_enabled = floating_ip_enabled
definition.update_outputs(mock_stack, mock_cluster_template,
self.mock_cluster)
actual = None
if cluster_attr:
actual = getattr(self.mock_cluster, cluster_attr)
elif is_master:
actual = getattr(
self.mock_cluster.default_ng_master, nodegroup_attr)
else:
actual = getattr(
self.mock_cluster.default_ng_worker, nodegroup_attr)
self.assertEqual(expected_address, actual)
class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
def get_definition(self):
return k8sa_dr.Driver().get_template_definition()
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
def test_k8s_get_scale_params(self, mock_get_output,
mock_osc_class):
mock_context = mock.MagicMock()
mock_cluster = mock.MagicMock()
removal_nodes = ['node1', 'node2']
node_count = 5
mock_scale_manager = mock.MagicMock()
mock_scale_manager.get_removal_nodes.return_value = removal_nodes
definition = k8sa_tdef.AtomicK8sTemplateDefinition()
scale_params = definition.get_scale_params(mock_context, mock_cluster,
node_count,
mock_scale_manager)
expected_scale_params = {
'minions_to_remove': ['node1', 'node2'],
'number_of_minions': 5
}
self.assertEqual(scale_params, expected_scale_params)
@mock.patch('magnum.common.neutron.get_subnet')
@mock.patch('magnum.drivers.heat.k8s_template_def.K8sTemplateDefinition'
'._set_master_lb_allowed_cidrs')
@mock.patch('magnum.common.neutron.get_fixed_network_name')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
'.AtomicK8sTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
@mock.patch('magnum.conductor.handlers.common.cert_manager'
'.sign_node_certificate')
@mock.patch('magnum.common.x509.operations.generate_csr_and_key')
def test_k8s_get_params(self, mock_generate_csr_and_key,
mock_sign_node_certificate,
mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class,
mock_enable_octavia,
mock_get_fixed_network_name,
mock_set_master_lb_allowed_cidrs,
mock_get_subnet):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
mock_sign_node_certificate.return_value = 'signed_cert'
mock_enable_octavia.return_value = False
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster_template.network_driver = 'flannel'
external_network_id = '17e4e301-b7f3-4996-b3dd-97b3a700174b'
mock_cluster_template.external_network_id = external_network_id
mock_cluster_template.no_proxy = ""
mock_cluster = mock.MagicMock()
fixed_network_name = 'fixed_network'
mock_get_fixed_network_name.return_value = fixed_network_name
fixed_network = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
mock_cluster.fixed_network = fixed_network
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53'
mock_cluster.fixed_subnet = fixed_subnet
del mock_cluster.stack_id
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc.cinder_region_name.return_value = 'RegionOne'
mock_osc_class.return_value = mock_osc
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_get_subnet.return_value = '20.200.0.0/16'
flannel_cidr = mock_cluster.labels.get('flannel_network_cidr')
flannel_subnet = mock_cluster.labels.get(
'flannel_network_subnetlen')
flannel_backend = mock_cluster.labels.get('flannel_backend')
heapster_enabled = mock_cluster.labels.get(
'heapster_enabled')
metrics_server_enabled = mock_cluster.labels.get(
'metrics_server_enabled')
metrics_server_chart_tag = mock_cluster.labels.get(
'metrics_server_chart_tag')
system_pods_initial_delay = mock_cluster.labels.get(
'system_pods_initial_delay')
system_pods_timeout = mock_cluster.labels.get(
'system_pods_timeout')
admission_control_list = mock_cluster.labels.get(
'admission_control_list')
prometheus_monitoring = mock_cluster.labels.get(
'prometheus_monitoring')
grafana_admin_passwd = mock_cluster.labels.get(
'grafana_admin_passwd')
kube_dashboard_enabled = mock_cluster.labels.get(
'kube_dashboard_enabled')
influx_grafana_dashboard_enabled = mock_cluster.labels.get(
'influx_grafana_dashboard_enabled')
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
boot_volume_size = mock_cluster.labels.get(
'boot_volume_size')
etcd_volume_size = mock_cluster.labels.get(
'etcd_volume_size')
hyperkube_prefix = mock_cluster.labels.get('hyperkube_prefix')
kube_tag = mock_cluster.labels.get('kube_tag')
etcd_tag = mock_cluster.labels.get('etcd_tag')
coredns_tag = mock_cluster.labels.get('coredns_tag')
flannel_tag = mock_cluster.labels.get('flannel_tag')
flannel_cni_tag = mock_cluster.labels.get('flannel_cni_tag')
container_infra_prefix = mock_cluster.labels.get(
'container_infra_prefix')
availability_zone = mock_cluster.labels.get(
'availability_zone')
cert_manager_api = mock_cluster.labels.get('cert_manager_api')
calico_tag = mock_cluster.labels.get(
'calico_tag')
calico_kube_controllers_tag = mock_cluster.labels.get(
'calico_kube_controllers_tag')
calico_ipv4pool = mock_cluster.labels.get(
'calico_ipv4pool')
calico_ipv4pool_ipip = mock_cluster.labels.get(
'calico_ipv4pool_ipip')
if mock_cluster_template.network_driver == 'flannel':
pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get(
'ingress_controller').lower()
ingress_controller_role = mock_cluster.labels.get(
'ingress_controller_role')
octavia_ingress_controller_tag = mock_cluster.labels.get(
'octavia_ingress_controller_tag')
nginx_ingress_controller_tag = mock_cluster.labels.get(
'nginx_ingress_controller_tag')
nginx_ingress_controller_chart_tag = mock_cluster.labels.get(
'nginx_ingress_controller_chart_tag')
kubelet_options = mock_cluster.labels.get(
'kubelet_options')
kubeapi_options = mock_cluster.labels.get(
'kubeapi_options')
kubecontroller_options = mock_cluster.labels.get(
'kubecontroller_options')
kubescheduler_options = mock_cluster.labels.get(
'kubescheduler_options')
kubeproxy_options = mock_cluster.labels.get(
'kubeproxy_options')
cloud_provider_enabled = mock_cluster.labels.get(
'cloud_provider_enabled')
cloud_provider_tag = mock_cluster.labels.get(
'cloud_provider_tag')
service_cluster_ip_range = mock_cluster.labels.get(
'service_cluster_ip_range')
prometheus_tag = mock_cluster.labels.get(
'prometheus_tag')
grafana_tag = mock_cluster.labels.get(
'grafana_tag')
heat_container_agent_tag = mock_cluster.labels.get(
'heat_container_agent_tag')
keystone_auth_enabled = mock_cluster.labels.get(
'keystone_auth_enabled')
k8s_keystone_auth_tag = mock_cluster.labels.get(
'k8s_keystone_auth_tag')
monitoring_enabled = mock_cluster.labels.get(
'monitoring_enabled')
monitoring_retention_days = mock_cluster.labels.get(
'monitoring_retention_days')
monitoring_retention_size = mock_cluster.labels.get(
'monitoring_retention_size')
monitoring_interval_seconds = mock_cluster.labels.get(
'monitoring_interval_seconds')
monitoring_storage_class_name = mock_cluster.labels.get(
'monitoring_storage_class_name')
monitoring_ingress_enabled = mock_cluster.labels.get(
'monitoring_ingress_enabled')
cluster_basic_auth_secret = mock_cluster.labels.get(
'cluster_basic_auth_secret')
cluster_root_domain_name = mock_cluster.labels.get(
'cluster_root_domain_name')
prometheus_operator_chart_tag = mock_cluster.labels.get(
'prometheus_operator_chart_tag')
prometheus_adapter_enabled = mock_cluster.labels.get(
'prometheus_adapter_enabled')
prometheus_adapter_chart_tag = mock_cluster.labels.get(
'prometheus_adapter_chart_tag')
prometheus_adapter_configmap = mock_cluster.labels.get(
'prometheus_adapter_configmap')
project_id = mock_cluster.project_id
tiller_enabled = mock_cluster.labels.get(
'tiller_enabled')
tiller_tag = mock_cluster.labels.get(
'tiller_tag')
tiller_namespace = mock_cluster.labels.get(
'tiller_namespace')
helm_client_url = mock_cluster.labels.get(
'helm_client_url')
helm_client_sha256 = mock_cluster.labels.get(
'helm_client_sha256')
helm_client_tag = mock_cluster.labels.get(
'helm_client_tag')
npd_tag = mock_cluster.labels.get('node_problem_detector_tag')
traefik_ingress_controller_tag = mock_cluster.labels.get(
'traefik_ingress_controller_tag')
auto_healing_enabled = mock_cluster.labels.get(
'auto_healing_enabled')
auto_healing_controller = mock_cluster.labels.get(
'auto_healing_controller')
magnum_auto_healer_tag = mock_cluster.labels.get(
'magnum_auto_healer_tag')
auto_scaling_enabled = mock_cluster.labels.get(
'auto_scaling_enabled')
cinder_csi_enabled = mock_cluster.labels.get(
'cinder_csi_enabled')
cinder_csi_plugin_tag = mock_cluster.labels.get(
'cinder_csi_plugin_tag')
csi_attacher_tag = mock_cluster.labels.get(
'csi_attacher_tag')
csi_provisioner_tag = mock_cluster.labels.get(
'csi_provisioner_tag')
csi_snapshotter_tag = mock_cluster.labels.get(
'csi_snapshotter_tag')
csi_resizer_tag = mock_cluster.labels.get(
'csi_resizer_tag')
csi_node_driver_registrar_tag = mock_cluster.labels.get(
'csi_node_driver_registrar_tag')
draino_tag = mock_cluster.labels.get('draino_tag')
autoscaler_tag = mock_cluster.labels.get('autoscaler_tag')
min_node_count = mock_cluster.labels.get('min_node_count')
max_node_count = mock_cluster.labels.get('max_node_count')
npd_enabled = mock_cluster.labels.get('npd_enabled')
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
ostree_remote = mock_cluster.labels.get('ostree_remote')
ostree_commit = mock_cluster.labels.get('ostree_commit')
use_podman = mock_cluster.labels.get('use_podman')
selinux_mode = mock_cluster.labels.get('selinux_mode')
container_runtime = mock_cluster.labels.get('container_runtime')
containerd_version = mock_cluster.labels.get('containerd_version')
containerd_tarball_url = mock_cluster.labels.get(
'containerd_tarball_url')
containerd_tarball_sha256 = mock_cluster.labels.get(
'containerd_tarball_sha256')
kube_image_digest = mock_cluster.labels.get('kube_image_digest')
metrics_scraper_tag = mock_cluster.labels.get('metrics_scraper_tag')
master_lb_allowed_cidrs = mock_cluster.labels.get(
'master_lb_allowed_cidrs')
octavia_provider = mock_cluster.labels.get('octavia_provider')
octavia_lb_algorithm = mock_cluster.labels.get('octavia_lb_algorithm')
octavia_lb_healthcheck = mock_cluster.labels.get('octavia_lb_healthcheck')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster)
expected_kwargs = {'extra_params': {
'discovery_url': 'fake_discovery_url',
'flannel_network_cidr': flannel_cidr,
'flannel_network_subnetlen': flannel_subnet,
'flannel_backend': flannel_backend,
'heapster_enabled': heapster_enabled,
'metrics_server_enabled': metrics_server_enabled,
'metrics_server_chart_tag': metrics_server_chart_tag,
'system_pods_initial_delay': system_pods_initial_delay,
'system_pods_timeout': system_pods_timeout,
'admission_control_list': admission_control_list,
'prometheus_monitoring': prometheus_monitoring,
'grafana_admin_passwd': grafana_admin_passwd,
'kube_dashboard_enabled': kube_dashboard_enabled,
'influx_grafana_dashboard_enabled':
influx_grafana_dashboard_enabled,
'docker_volume_type': docker_volume_type,
'boot_volume_size': boot_volume_size,
'etcd_volume_size': etcd_volume_size,
'kubelet_options': kubelet_options,
'kubeapi_options': kubeapi_options,
'kubecontroller_options': kubecontroller_options,
'kubescheduler_options': kubescheduler_options,
'kubeproxy_options': kubeproxy_options,
'cloud_provider_enabled': cloud_provider_enabled,
'cloud_provider_tag': cloud_provider_tag,
'username': 'fake_user',
'magnum_url': mock_osc.magnum_url.return_value,
'region_name': mock_osc.cinder_region_name.return_value,
'hyperkube_prefix': hyperkube_prefix,
'kube_tag': kube_tag,
'etcd_tag': etcd_tag,
'coredns_tag': coredns_tag,
'fixed_network_name': fixed_network_name,
'fixed_subnet': fixed_subnet,
'flannel_tag': flannel_tag,
'flannel_cni_tag': flannel_cni_tag,
'container_infra_prefix': container_infra_prefix,
'nodes_affinity_policy': 'soft-anti-affinity',
'availability_zone': availability_zone,
'cert_manager_api': cert_manager_api,
'calico_tag': calico_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool,
'calico_ipv4pool_ipip': calico_ipv4pool_ipip,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role,
'octavia_ingress_controller_tag': octavia_ingress_controller_tag,
'nginx_ingress_controller_tag': nginx_ingress_controller_tag,
'nginx_ingress_controller_chart_tag':
nginx_ingress_controller_chart_tag,
'octavia_enabled': False,
'kube_service_account_key': 'public_key',
'kube_service_account_private_key': 'private_key',
'portal_network_cidr': service_cluster_ip_range,
'prometheus_tag': prometheus_tag,
'grafana_tag': grafana_tag,
'heat_container_agent_tag': heat_container_agent_tag,
'keystone_auth_enabled': keystone_auth_enabled,
'k8s_keystone_auth_tag': k8s_keystone_auth_tag,
'monitoring_enabled': monitoring_enabled,
'monitoring_retention_days': monitoring_retention_days,
'monitoring_retention_size': monitoring_retention_size,
'monitoring_interval_seconds': monitoring_interval_seconds,
'monitoring_storage_class_name': monitoring_storage_class_name,
'monitoring_ingress_enabled': monitoring_ingress_enabled,
'cluster_basic_auth_secret': cluster_basic_auth_secret,
'cluster_root_domain_name': cluster_root_domain_name,
'prometheus_operator_chart_tag': prometheus_operator_chart_tag,
'prometheus_adapter_enabled': prometheus_adapter_enabled,
'prometheus_adapter_chart_tag': prometheus_adapter_chart_tag,
'prometheus_adapter_configmap': prometheus_adapter_configmap,
'project_id': project_id,
'external_network': external_network_id,
'tiller_enabled': tiller_enabled,
'tiller_tag': tiller_tag,
'tiller_namespace': tiller_namespace,
'helm_client_url': helm_client_url,
'helm_client_sha256': helm_client_sha256,
'helm_client_tag': helm_client_tag,
'node_problem_detector_tag': npd_tag,
'auto_healing_enabled': auto_healing_enabled,
'auto_healing_controller': auto_healing_controller,
'magnum_auto_healer_tag': magnum_auto_healer_tag,
'auto_scaling_enabled': auto_scaling_enabled,
'cinder_csi_enabled': cinder_csi_enabled,
'cinder_csi_plugin_tag': cinder_csi_plugin_tag,
'csi_attacher_tag': csi_attacher_tag,
'csi_provisioner_tag': csi_provisioner_tag,
'csi_snapshotter_tag': csi_snapshotter_tag,
'csi_resizer_tag': csi_resizer_tag,
'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag,
'draino_tag': draino_tag,
'autoscaler_tag': autoscaler_tag,
'min_node_count': min_node_count,
'max_node_count': max_node_count,
'traefik_ingress_controller_tag': traefik_ingress_controller_tag,
'npd_enabled': npd_enabled,
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'boot_volume_size': boot_volume_size,
'boot_volume_type': boot_volume_type,
'etcd_volume_type': etcd_volume_type,
'ostree_remote': ostree_remote,
'ostree_commit': ostree_commit,
'use_podman': use_podman,
'selinux_mode': selinux_mode,
'kube_image_digest': kube_image_digest,
'container_runtime': container_runtime,
'containerd_version': containerd_version,
'containerd_tarball_url': containerd_tarball_url,
'containerd_tarball_sha256': containerd_tarball_sha256,
'post_install_manifest_url': '',
'metrics_scraper_tag': metrics_scraper_tag,
'master_lb_allowed_cidrs': master_lb_allowed_cidrs,
'fixed_subnet_cidr': '20.200.0.0/16',
'octavia_provider': octavia_provider,
'octavia_lb_algorithm': octavia_lb_algorithm,
'octavia_lb_healthcheck': octavia_lb_healthcheck,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
mock_cluster,
**expected_kwargs)
mock_cluster_template.volume_driver = 'cinder'
mock_cluster.labels = {'cloud_provider_enabled': 'false'}
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(
exception.InvalidParameterValue,
k8s_def.get_params,
mock_context,
mock_cluster_template,
mock_cluster,
)
actual_params = mock_get_params.call_args[1]["extra_params"]
self.assertEqual(
fixed_network_name,
actual_params.get("fixed_network_name")
)
mock_get_fixed_network_name.assert_called_once_with(
mock_context,
mock_cluster.fixed_network
)
@mock.patch('magnum.common.neutron.get_subnet')
@mock.patch('magnum.common.neutron.get_external_network_id')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
'.AtomicK8sTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
@mock.patch('magnum.common.x509.operations.generate_csr_and_key')
def test_k8s_get_params_external_network_id(self,
mock_generate_csr_and_key,
mock_get_output,
mock_get_params,
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia,
mock_get_external_network_id,
mock_get_subnet):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
mock_enable_octavia.return_value = False
mock_get_discovery_url.return_value = 'fake_discovery_url'
external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e'
mock_get_external_network_id.return_value = external_network_id
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster_template.network_driver = 'calico'
mock_cluster_template.external_network_id = 'public'
mock_cluster = mock.MagicMock()
mock_cluster.labels = {}
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52'
mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53'
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc.cinder_region_name.return_value = 'RegionOne'
mock_osc_class.return_value = mock_osc
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster)
actual_params = mock_get_params.call_args[1]["extra_params"]
self.assertEqual(
external_network_id,
actual_params.get("external_network")
)
mock_get_external_network_id.assert_called_once_with(
mock_context,
mock_cluster_template.external_network_id
)
@mock.patch('magnum.common.neutron.get_subnet')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
'.AtomicK8sTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
@mock.patch('magnum.common.x509.operations.generate_csr_and_key')
def test_k8s_get_params_octavia_disabled(self,
mock_generate_csr_and_key,
mock_get_output,
mock_get_params,
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia,
mock_get_subnet):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
mock_enable_octavia.return_value = False
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster_template.network_driver = 'calico'
external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e'
mock_cluster_template.external_network_id = external_network_id
mock_cluster = mock.MagicMock()
mock_cluster.labels = {"ingress_controller": "octavia"}
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52'
mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53'
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc.cinder_region_name.return_value = 'RegionOne'
mock_osc_class.return_value = mock_osc
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(
exception.InvalidParameterValue,
k8s_def.get_params,
mock_context,
mock_cluster_template,
mock_cluster,
)
@mock.patch('magnum.common.neutron.get_subnet')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
'.AtomicK8sTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
@mock.patch('magnum.common.x509.operations.generate_csr_and_key')
def test_k8s_get_params_octavia_enabled(self,
mock_generate_csr_and_key,
mock_get_output,
mock_get_params,
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia,
mock_get_subnet):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
mock_enable_octavia.return_value = True
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster_template.network_driver = 'calico'
external_network_id = 'e2a6c8b0-a3c2-42a3-b3f4-01400a30896e'
mock_cluster_template.external_network_id = external_network_id
mock_cluster = mock.MagicMock()
mock_cluster.labels = {"ingress_controller": "octavia"}
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
mock_cluster.project_id = 'e2a6c8b0-a3c2-42a3-b3f4-1f639a523a52'
mock_cluster.fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53'
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc.cinder_region_name.return_value = 'RegionOne'
mock_osc_class.return_value = mock_osc
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster)
actual_params = mock_get_params.call_args[1]["extra_params"]
self.assertEqual(
"octavia",
actual_params.get("ingress_controller")
)
@mock.patch('magnum.common.neutron.get_subnet')
@mock.patch('magnum.drivers.heat.k8s_template_def.K8sTemplateDefinition'
'._set_master_lb_allowed_cidrs')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.heat.template_def'
'.BaseTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
@mock.patch('magnum.conductor.handlers.common.cert_manager'
'.sign_node_certificate')
@mock.patch('magnum.common.x509.operations.generate_csr_and_key')
def test_k8s_get_params_insecure(self, mock_generate_csr_and_key,
mock_sign_node_certificate,
mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class,
mock_enable_octavia,
mock_set_master_lb_allowed_cidrs,
mock_get_subnet):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
mock_sign_node_certificate.return_value = 'signed_cert'
mock_enable_octavia.return_value = False
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = True
mock_cluster_template.registry_enabled = False
mock_cluster_template.network_driver = 'calico'
external_network_id = '17e4e301-b7f3-4996-b3dd-97b3a700174b'
mock_cluster_template.external_network_id = external_network_id
mock_cluster_template.no_proxy = ""
mock_cluster = mock.MagicMock()
fixed_network_name = 'fixed_network'
mock_cluster.fixed_network = fixed_network_name
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
fixed_subnet = 'f2a6c8b0-a3c2-42a3-b3f4-1f639a523a53'
mock_cluster.fixed_subnet = fixed_subnet
del mock_cluster.stack_id
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc.cinder_region_name.return_value
mock_osc_class.return_value = mock_osc
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_get_subnet.return_value = "20.200.0.0/16"
flannel_cidr = mock_cluster.labels.get('flannel_network_cidr')
flannel_subnet = mock_cluster.labels.get(
'flannel_network_subnetlen')
flannel_backend = mock_cluster.labels.get('flannel_backend')
heapster_enabled = mock_cluster.labels.get(
'heapster_enabled')
metrics_server_enabled = mock_cluster.labels.get(
'metrics_server_enabled')
metrics_server_chart_tag = mock_cluster.labels.get(
'metrics_server_chart_tag')
system_pods_initial_delay = mock_cluster.labels.get(
'system_pods_initial_delay')
system_pods_timeout = mock_cluster.labels.get(
'system_pods_timeout')
admission_control_list = mock_cluster.labels.get(
'admission_control_list')
prometheus_monitoring = mock_cluster.labels.get(
'prometheus_monitoring')
grafana_admin_passwd = mock_cluster.labels.get(
'grafana_admin_passwd')
kube_dashboard_enabled = mock_cluster.labels.get(
'kube_dashboard_enabled')
influx_grafana_dashboard_enabled = mock_cluster.labels.get(
'influx_grafana_dashboard_enabled')
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
boot_volume_size = mock_cluster.labels.get(
'boot_volume_size')
etcd_volume_size = mock_cluster.labels.get(
'etcd_volume_size')
hyperkube_prefix = mock_cluster.labels.get('hyperkube_prefix')
kube_tag = mock_cluster.labels.get('kube_tag')
etcd_tag = mock_cluster.labels.get('etcd_tag')
coredns_tag = mock_cluster.labels.get('coredns_tag')
flannel_tag = mock_cluster.labels.get('flannel_tag')
flannel_cni_tag = mock_cluster.labels.get('flannel_cni_tag')
container_infra_prefix = mock_cluster.labels.get(
'container_infra_prefix')
availability_zone = mock_cluster.labels.get(
'availability_zone')
cert_manager_api = mock_cluster.labels.get('cert_manager_api')
calico_tag = mock_cluster.labels.get(
'calico_tag')
calico_kube_controllers_tag = mock_cluster.labels.get(
'calico_kube_controllers_tag')
calico_ipv4pool = mock_cluster.labels.get(
'calico_ipv4pool')
calico_ipv4pool_ipip = mock_cluster.labels.get(
'calico_ipv4pool_ipip')
if mock_cluster_template.network_driver == 'flannel':
pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get(
'ingress_controller').lower()
ingress_controller_role = mock_cluster.labels.get(
'ingress_controller_role')
octavia_ingress_controller_tag = mock_cluster.labels.get(
'octavia_ingress_controller_tag')
nginx_ingress_controller_tag = mock_cluster.labels.get(
'nginx_ingress_controller_tag')
nginx_ingress_controller_chart_tag = mock_cluster.labels.get(
'nginx_ingress_controller_chart_tag')
kubelet_options = mock_cluster.labels.get(
'kubelet_options')
kubeapi_options = mock_cluster.labels.get(
'kubeapi_options')
kubecontroller_options = mock_cluster.labels.get(
'kubecontroller_options')
kubescheduler_options = mock_cluster.labels.get(
'kubescheduler_options')
kubeproxy_options = mock_cluster.labels.get(
'kubeproxy_options')
cloud_provider_enabled = mock_cluster.labels.get(
'cloud_provider_enabled')
cloud_provider_tag = mock_cluster.labels.get(
'cloud_provider_tag')
service_cluster_ip_range = mock_cluster.labels.get(
'service_cluster_ip_range')
prometheus_tag = mock_cluster.labels.get(
'prometheus_tag')
grafana_tag = mock_cluster.labels.get(
'grafana_tag')
heat_container_agent_tag = mock_cluster.labels.get(
'heat_container_agent_tag')
keystone_auth_enabled = mock_cluster.labels.get(
'keystone_auth_enabled')
k8s_keystone_auth_tag = mock_cluster.labels.get(
'k8s_keystone_auth_tag')
monitoring_enabled = mock_cluster.labels.get(
'monitoring_enabled')
monitoring_retention_days = mock_cluster.labels.get(
'monitoring_retention_days')
monitoring_retention_size = mock_cluster.labels.get(
'monitoring_retention_size')
monitoring_interval_seconds = mock_cluster.labels.get(
'monitoring_interval_seconds')
monitoring_storage_class_name = mock_cluster.labels.get(
'monitoring_storage_class_name')
monitoring_ingress_enabled = mock_cluster.labels.get(
'monitoring_ingress_enabled')
cluster_basic_auth_secret = mock_cluster.labels.get(
'cluster_basic_auth_secret')
cluster_root_domain_name = mock_cluster.labels.get(
'cluster_root_domain_name')
prometheus_operator_chart_tag = mock_cluster.labels.get(
'prometheus_operator_chart_tag')
prometheus_adapter_enabled = mock_cluster.labels.get(
'prometheus_adapter_enabled')
prometheus_adapter_chart_tag = mock_cluster.labels.get(
'prometheus_adapter_chart_tag')
prometheus_adapter_configmap = mock_cluster.labels.get(
'prometheus_adapter_configmap')
project_id = mock_cluster.project_id
tiller_enabled = mock_cluster.labels.get(
'tiller_enabled')
tiller_tag = mock_cluster.labels.get(
'tiller_tag')
tiller_namespace = mock_cluster.labels.get(
'tiller_namespace')
helm_client_url = mock_cluster.labels.get(
'helm_client_url')
helm_client_sha256 = mock_cluster.labels.get(
'helm_client_sha256')
helm_client_tag = mock_cluster.labels.get(
'helm_client_tag')
npd_tag = mock_cluster.labels.get('node_problem_detector_tag')
traefik_ingress_controller_tag = mock_cluster.labels.get(
'traefik_ingress_controller_tag')
auto_healing_enabled = mock_cluster.labels.get(
'auto_healing_enabled')
auto_healing_controller = mock_cluster.labels.get(
'auto_healing_controller')
magnum_auto_healer_tag = mock_cluster.labels.get(
'magnum_auto_healer_tag')
auto_scaling_enabled = mock_cluster.labels.get(
'auto_scaling_enabled')
cinder_csi_enabled = mock_cluster.labels.get(
'cinder_csi_enabled')
cinder_csi_plugin_tag = mock_cluster.labels.get(
'cinder_csi_plugin_tag')
csi_attacher_tag = mock_cluster.labels.get(
'csi_attacher_tag')
csi_provisioner_tag = mock_cluster.labels.get(
'csi_provisioner_tag')
csi_snapshotter_tag = mock_cluster.labels.get(
'csi_snapshotter_tag')
csi_resizer_tag = mock_cluster.labels.get(
'csi_resizer_tag')
csi_node_driver_registrar_tag = mock_cluster.labels.get(
'csi_node_driver_registrar_tag')
draino_tag = mock_cluster.labels.get('draino_tag')
autoscaler_tag = mock_cluster.labels.get('autoscaler_tag')
min_node_count = mock_cluster.labels.get('min_node_count')
max_node_count = mock_cluster.labels.get('max_node_count')
npd_enabled = mock_cluster.labels.get('npd_enabled')
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
ostree_remote = mock_cluster.labels.get('ostree_remote')
ostree_commit = mock_cluster.labels.get('ostree_commit')
use_podman = mock_cluster.labels.get('use_podman')
selinux_mode = mock_cluster.labels.get('selinux_mode')
container_runtime = mock_cluster.labels.get('container_runtime')
containerd_version = mock_cluster.labels.get('containerd_version')
containerd_tarball_url = mock_cluster.labels.get(
'containerd_tarball_url')
containerd_tarball_sha256 = mock_cluster.labels.get(
'containerd_tarball_sha256')
kube_image_digest = mock_cluster.labels.get('kube_image_digest')
metrics_scraper_tag = mock_cluster.labels.get('metrics_scraper_tag')
master_lb_allowed_cidrs = mock_cluster.labels.get(
'master_lb_allowed_cidrs')
octavia_provider = mock_cluster.labels.get('octavia_provider')
octavia_lb_algorithm = mock_cluster.labels.get('octavia_lb_algorithm')
octavia_lb_healthcheck = mock_cluster.labels.get('octavia_lb_healthcheck')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster)
expected_kwargs = {'extra_params': {
'discovery_url': 'fake_discovery_url',
'flannel_network_cidr': flannel_cidr,
'flannel_network_subnetlen': flannel_subnet,
'flannel_backend': flannel_backend,
'heapster_enabled': heapster_enabled,
'metrics_server_enabled': metrics_server_enabled,
'metrics_server_chart_tag': metrics_server_chart_tag,
'system_pods_initial_delay': system_pods_initial_delay,
'system_pods_timeout': system_pods_timeout,
'fixed_network_name': fixed_network_name,
'fixed_subnet': fixed_subnet,
'admission_control_list': admission_control_list,
'prometheus_monitoring': prometheus_monitoring,
'grafana_admin_passwd': grafana_admin_passwd,
'kube_dashboard_enabled': kube_dashboard_enabled,
'influx_grafana_dashboard_enabled':
influx_grafana_dashboard_enabled,
'docker_volume_type': docker_volume_type,
'boot_volume_size': boot_volume_size,
'etcd_volume_size': etcd_volume_size,
'kubelet_options': kubelet_options,
'kubeapi_options': kubeapi_options,
'kubecontroller_options': kubecontroller_options,
'kubescheduler_options': kubescheduler_options,
'kubeproxy_options': kubeproxy_options,
'cloud_provider_enabled': cloud_provider_enabled,
'cloud_provider_tag': cloud_provider_tag,
'username': 'fake_user',
'magnum_url': mock_osc.magnum_url.return_value,
'region_name': mock_osc.cinder_region_name.return_value,
'loadbalancing_protocol': 'HTTP',
'kubernetes_port': 8080,
'hyperkube_prefix': hyperkube_prefix,
'kube_tag': kube_tag,
'etcd_tag': etcd_tag,
'coredns_tag': coredns_tag,
'flannel_tag': flannel_tag,
'flannel_cni_tag': flannel_cni_tag,
'container_infra_prefix': container_infra_prefix,
'nodes_affinity_policy': 'soft-anti-affinity',
'availability_zone': availability_zone,
'cert_manager_api': cert_manager_api,
'calico_tag': calico_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool,
'calico_ipv4pool_ipip': calico_ipv4pool_ipip,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role,
'octavia_ingress_controller_tag': octavia_ingress_controller_tag,
'nginx_ingress_controller_tag': nginx_ingress_controller_tag,
'nginx_ingress_controller_chart_tag':
nginx_ingress_controller_chart_tag,
'octavia_enabled': False,
'kube_service_account_key': 'public_key',
'kube_service_account_private_key': 'private_key',
'portal_network_cidr': service_cluster_ip_range,
'prometheus_tag': prometheus_tag,
'grafana_tag': grafana_tag,
'heat_container_agent_tag': heat_container_agent_tag,
'keystone_auth_enabled': keystone_auth_enabled,
'k8s_keystone_auth_tag': k8s_keystone_auth_tag,
'monitoring_enabled': monitoring_enabled,
'monitoring_retention_days': monitoring_retention_days,
'monitoring_retention_size': monitoring_retention_size,
'monitoring_interval_seconds': monitoring_interval_seconds,
'monitoring_storage_class_name': monitoring_storage_class_name,
'monitoring_ingress_enabled': monitoring_ingress_enabled,
'cluster_basic_auth_secret': cluster_basic_auth_secret,
'cluster_root_domain_name': cluster_root_domain_name,
'prometheus_operator_chart_tag': prometheus_operator_chart_tag,
'prometheus_adapter_enabled': prometheus_adapter_enabled,
'prometheus_adapter_chart_tag': prometheus_adapter_chart_tag,
'prometheus_adapter_configmap': prometheus_adapter_configmap,
'project_id': project_id,
'external_network': external_network_id,
'tiller_enabled': tiller_enabled,
'tiller_tag': tiller_tag,
'tiller_namespace': tiller_namespace,
'helm_client_url': helm_client_url,
'helm_client_sha256': helm_client_sha256,
'helm_client_tag': helm_client_tag,
'node_problem_detector_tag': npd_tag,
'auto_healing_enabled': auto_healing_enabled,
'auto_healing_controller': auto_healing_controller,
'magnum_auto_healer_tag': magnum_auto_healer_tag,
'auto_scaling_enabled': auto_scaling_enabled,
'cinder_csi_enabled': cinder_csi_enabled,
'cinder_csi_plugin_tag': cinder_csi_plugin_tag,
'csi_attacher_tag': csi_attacher_tag,
'csi_provisioner_tag': csi_provisioner_tag,
'csi_snapshotter_tag': csi_snapshotter_tag,
'csi_resizer_tag': csi_resizer_tag,
'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag,
'draino_tag': draino_tag,
'autoscaler_tag': autoscaler_tag,
'min_node_count': min_node_count,
'max_node_count': max_node_count,
'traefik_ingress_controller_tag': traefik_ingress_controller_tag,
'npd_enabled': npd_enabled,
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'boot_volume_size': boot_volume_size,
'boot_volume_type': boot_volume_type,
'etcd_volume_type': etcd_volume_type,
'ostree_remote': ostree_remote,
'ostree_commit': ostree_commit,
'use_podman': use_podman,
'selinux_mode': selinux_mode,
'kube_image_digest': kube_image_digest,
'container_runtime': container_runtime,
'containerd_version': containerd_version,
'containerd_tarball_url': containerd_tarball_url,
'containerd_tarball_sha256': containerd_tarball_sha256,
'post_install_manifest_url': '',
'metrics_scraper_tag': metrics_scraper_tag,
'master_lb_allowed_cidrs': master_lb_allowed_cidrs,
'fixed_subnet_cidr': '20.200.0.0/16',
'octavia_provider': octavia_provider,
'octavia_lb_algorithm': octavia_lb_algorithm,
'octavia_lb_healthcheck': octavia_lb_healthcheck,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
mock_cluster,
**expected_kwargs)
@mock.patch('requests.get')
def test_k8s_validate_discovery_url(self, mock_get):
expected_result = str('{"action":"get","node":{"key":"test","value":'
'"1","modifiedIndex":10,"createdIndex":10}}')
mock_resp = mock.MagicMock()
mock_resp.text = expected_result
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.validate_discovery_url('http://etcd/test', 1)
@mock.patch('requests.get')
def test_k8s_validate_discovery_url_fail(self, mock_get):
mock_get.side_effect = req_exceptions.RequestException()
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.GetClusterSizeFailed,
k8s_def.validate_discovery_url,
'http://etcd/test', 1)
@mock.patch('requests.get')
def test_k8s_validate_discovery_url_invalid(self, mock_get):
mock_resp = mock.MagicMock()
mock_resp.text = str('{"action":"get"}')
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.InvalidClusterDiscoveryURL,
k8s_def.validate_discovery_url,
'http://etcd/test', 1)
@mock.patch('requests.get')
def test_k8s_validate_discovery_url_unexpect_size(self, mock_get):
expected_result = str('{"action":"get","node":{"key":"test","value":'
'"1","modifiedIndex":10,"createdIndex":10}}')
mock_resp = mock.MagicMock()
mock_resp.text = expected_result
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.InvalidClusterSize,
k8s_def.validate_discovery_url,
'http://etcd/test', 5)
@mock.patch('requests.get')
def test_k8s_get_discovery_url(self, mock_get):
CONF.set_override('etcd_discovery_service_endpoint_format',
'http://etcd/test?size=%(size)d',
group='cluster')
expected_discovery_url = 'http://etcd/token'
mock_resp = mock.MagicMock()
mock_resp.text = expected_discovery_url
mock_resp.status_code = 200
mock_get.return_value = mock_resp
mock_cluster = mock.MagicMock()
mock_cluster.master_count = 10
mock_cluster.discovery_url = None
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
discovery_url = k8s_def.get_discovery_url(mock_cluster)
mock_get.assert_called_once_with('http://etcd/test?size=10')
self.assertEqual(expected_discovery_url, mock_cluster.discovery_url)
self.assertEqual(expected_discovery_url, discovery_url)
@mock.patch('requests.get')
def test_k8s_get_discovery_url_fail(self, mock_get):
CONF.set_override('etcd_discovery_service_endpoint_format',
'http://etcd/test?size=%(size)d',
group='cluster')
mock_get.side_effect = req_exceptions.RequestException()
mock_cluster = mock.MagicMock()
mock_cluster.master_count = 10
mock_cluster.discovery_url = None
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.GetDiscoveryUrlFailed,
k8s_def.get_discovery_url, mock_cluster)
def test_k8s_get_heat_param(self):
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.add_nodegroup_params(self.mock_cluster)
heat_param = k8s_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='worker_ng')
self.assertEqual('number_of_minions', heat_param)
heat_param = k8s_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='master_ng')
self.assertEqual('number_of_masters', heat_param)
@mock.patch('requests.get')
def test_k8s_get_discovery_url_not_found(self, mock_get):
mock_resp = mock.MagicMock()
mock_resp.text = ''
mock_resp.status_code = 200
mock_get.return_value = mock_resp
fake_cluster = mock.MagicMock()
fake_cluster.discovery_url = None
self.assertRaises(
exception.InvalidDiscoveryURL,
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
fake_cluster)
def _test_update_outputs_api_address(self, template_definition,
params, tls=True):
expected_api_address = '%(protocol)s://%(address)s:%(port)s' % params
outputs = [
{"output_value": params['address'],
"description": "No description given",
"output_key": 'api_address'},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster = mock.MagicMock()
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = tls
template_definition.update_outputs(mock_stack, mock_cluster_template,
mock_cluster)
self.assertEqual(expected_api_address, mock_cluster.api_address)
def test_update_k8s_outputs_api_address(self):
address = 'updated_address'
protocol = 'http'
port = '8080'
params = {
'protocol': protocol,
'address': address,
'port': port,
}
template_definition = k8sa_tdef.AtomicK8sTemplateDefinition()
self._test_update_outputs_api_address(template_definition, params)
def test_update_swarm_outputs_api_address(self):
address = 'updated_address'
protocol = 'tcp'
port = '2376'
params = {
'protocol': protocol,
'address': address,
'port': port,
}
template_definition = swarm_tdef.AtomicSwarmTemplateDefinition()
self._test_update_outputs_api_address(template_definition, params)
def test_update_k8s_outputs_if_cluster_template_is_secure(self):
address = 'updated_address'
protocol = 'https'
port = '6443'
params = {
'protocol': protocol,
'address': address,
'port': port,
}
template_definition = k8sa_tdef.AtomicK8sTemplateDefinition()
self._test_update_outputs_api_address(template_definition, params,
tls=False)
def test_update_swarm_outputs_if_cluster_template_is_secure(self):
address = 'updated_address'
protocol = 'tcp'
port = '2376'
params = {
'protocol': protocol,
'address': address,
'port': port,
}
template_definition = swarm_tdef.AtomicSwarmTemplateDefinition()
self._test_update_outputs_api_address(template_definition, params,
tls=False)
def _test_update_outputs_none_api_address(self, template_definition,
params, tls=True):
outputs = [
{"output_value": params['address'],
"description": "No description given",
"output_key": 'api_address'},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster = mock.MagicMock()
mock_cluster.api_address = 'none_api_address'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = tls
template_definition.update_outputs(mock_stack, mock_cluster_template,
mock_cluster)
self.assertEqual('none_api_address', mock_cluster.api_address)
def test_update_k8s_outputs_none_api_address(self):
protocol = 'http'
port = '8080'
params = {
'protocol': protocol,
'address': None,
'port': port,
}
template_definition = k8sa_tdef.AtomicK8sTemplateDefinition()
self._test_update_outputs_none_api_address(template_definition, params)
def test_update_swarm_outputs_none_api_address(self):
protocol = 'tcp'
port = '2376'
params = {
'protocol': protocol,
'address': None,
'port': port,
}
template_definition = swarm_tdef.AtomicSwarmTemplateDefinition()
self._test_update_outputs_none_api_address(template_definition, params)
def test_update_outputs_master_address(self):
self._test_update_outputs_server_address(
public_ip_output_key='kube_masters',
private_ip_output_key='kube_masters_private',
nodegroup_attr='node_addresses',
is_master=True
)
def test_update_outputs_node_address(self):
self._test_update_outputs_server_address(
public_ip_output_key='kube_minions',
private_ip_output_key='kube_minions_private',
nodegroup_attr='node_addresses',
is_master=False
)
def test_update_outputs_master_address_fip_disabled(self):
self._test_update_outputs_server_address(
floating_ip_enabled=False,
public_ip_output_key='kube_masters',
private_ip_output_key='kube_masters_private',
nodegroup_attr='node_addresses',
is_master=True
)
def test_update_outputs_node_address_fip_disabled(self):
self._test_update_outputs_server_address(
floating_ip_enabled=False,
public_ip_output_key='kube_minions',
private_ip_output_key='kube_minions_private',
nodegroup_attr='node_addresses',
is_master=False
)
def test_set_master_lb_allowed_cidrs(self):
definition = self.get_definition()
extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16"}
mock_cluster = mock.MagicMock()
mock_context = mock.MagicMock()
mock_cluster.labels = {}
definition._set_master_lb_allowed_cidrs(mock_context,
mock_cluster, extra_params)
self.assertEqual(extra_params["master_lb_allowed_cidrs"],
"192.168.0.0/16,10.0.0.0/24")
def test_set_master_lb_allowed_cidrs_fixed_subnet_cidr(self):
definition = self.get_definition()
extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16"}
mock_cluster = mock.MagicMock()
mock_context = mock.MagicMock()
mock_cluster.labels = {"fixed_subnet_cidr": "100.0.0.0/24"}
definition._set_master_lb_allowed_cidrs(mock_context,
mock_cluster, extra_params)
self.assertEqual(extra_params["master_lb_allowed_cidrs"],
"192.168.0.0/16,100.0.0.0/24")
@mock.patch('magnum.common.neutron.get_subnet')
def test_set_master_lb_allowed_cidrs_find_subnet_cidr(self,
mock_get_subnet):
definition = self.get_definition()
extra_params = {"master_lb_allowed_cidrs": "192.168.0.0/16",
"fixed_subnet": "fake_subnet_id"}
mock_cluster = mock.MagicMock()
mock_context = mock.MagicMock()
mock_cluster.labels = {}
mock_get_subnet.return_value = "172.24.0.0/16"
definition._set_master_lb_allowed_cidrs(mock_context,
mock_cluster, extra_params)
self.assertEqual(extra_params["master_lb_allowed_cidrs"],
"192.168.0.0/16,172.24.0.0/16")
class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
def get_definition(self):
return k8s_i_dr.Driver().get_template_definition()
def assert_neutron_find(self, mock_neutron_v20_find,
osc, cluster_template):
mock_neutron_v20_find.assert_called_once_with(
osc.neutron(),
'subnet',
cluster_template.fixed_subnet
)
def assert_raises_from_get_fixed_network_id(
self,
mock_neutron_v20_find,
exeption_from_neutron_client,
expected_exception_class
):
definition = self.get_definition()
osc = mock.MagicMock()
cluster_template = mock.MagicMock()
mock_neutron_v20_find.side_effect = exeption_from_neutron_client
self.assertRaises(
expected_exception_class,
definition.get_fixed_network_id,
osc,
cluster_template
)
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
def test_get_fixed_network_id(self, mock_neutron_v20_find):
expected_network_id = 'expected_network_id'
osc = mock.MagicMock()
cluster_template = mock.MagicMock()
definition = self.get_definition()
mock_neutron_v20_find.return_value = {
'ip_version': 4,
'network_id': expected_network_id,
}
self.assertEqual(
expected_network_id,
definition.get_fixed_network_id(osc, cluster_template)
)
self.assert_neutron_find(mock_neutron_v20_find, osc, cluster_template)
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
def test_get_fixed_network_id_with_invalid_ip_ver(self,
mock_neutron_v20_find):
osc = mock.MagicMock()
cluster_template = mock.MagicMock()
definition = self.get_definition()
mock_neutron_v20_find.return_value = {
'ip_version': 6,
'network_id': 'expected_network_id',
}
self.assertRaises(
exception.InvalidSubnet,
definition.get_fixed_network_id,
osc,
cluster_template
)
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
def test_get_fixed_network_id_with_duplicated_name(self,
mock_neutron_v20_find):
ex = n_exception.NeutronClientNoUniqueMatch(
resource='subnet',
name='duplicated-name'
)
self.assert_raises_from_get_fixed_network_id(
mock_neutron_v20_find,
ex,
exception.InvalidSubnet,
)
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
def test_get_fixed_network_id_with_client_error(self,
mock_neutron_v20_find):
ex = n_exception.BadRequest()
self.assert_raises_from_get_fixed_network_id(
mock_neutron_v20_find,
ex,
exception.InvalidSubnet,
)
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
def test_get_fixed_network_id_with_server_error(self,
mock_neutron_v20_find):
ex = n_exception.ServiceUnavailable()
self.assert_raises_from_get_fixed_network_id(
mock_neutron_v20_find,
ex,
n_exception.ServiceUnavailable,
)
class AtomicSwarmModeTemplateDefinitionTestCase(base.TestCase):
def setUp(self):
super(AtomicSwarmModeTemplateDefinitionTestCase, self).setUp()
self.master_ng = mock.MagicMock(uuid='master_ng', role='master')
self.worker_ng = mock.MagicMock(uuid='worker_ng', role='worker')
self.nodegroups = [self.master_ng, self.worker_ng]
self.mock_cluster = mock.MagicMock(nodegroups=self.nodegroups,
default_ng_worker=self.worker_ng,
default_ng_master=self.master_ng)
def get_definition(self):
return swarm_v2_dr.Driver().get_template_definition()
def _test_update_outputs_server_address(
self,
floating_ip_enabled=True,
public_ip_output_key='swarm_nodes',
private_ip_output_key='swarm_nodes_private',
cluster_attr=None,
nodegroup_attr=None,
is_master=False
):
definition = self.get_definition()
expected_address = expected_public_address = ['public']
expected_private_address = ['private']
if not floating_ip_enabled:
expected_address = expected_private_address
outputs = [
{"output_value": expected_public_address,
"description": "No description given",
"output_key": public_ip_output_key},
{"output_value": expected_private_address,
"description": "No description given",
"output_key": private_ip_output_key},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster_template = mock.MagicMock()
mock_cluster_template.floating_ip_enabled = floating_ip_enabled
self.mock_cluster.floating_ip_enabled = floating_ip_enabled
definition.update_outputs(mock_stack, mock_cluster_template,
self.mock_cluster)
actual = None
if cluster_attr:
actual = getattr(self.mock_cluster, cluster_attr)
elif is_master:
actual = getattr(self.master_ng, nodegroup_attr)
else:
actual = getattr(self.worker_ng, nodegroup_attr)
self.assertEqual(expected_address, actual)
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.swarm_fedora_atomic_v2.template_def'
'.AtomicSwarmTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
def test_swarm_get_params(self, mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class):
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster = mock.MagicMock()
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
del mock_cluster.stack_id
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc_class.return_value = mock_osc
discovery_url = 'fake_discovery_url'
mock_get_discovery_url.return_value = discovery_url
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_context.tenant = 'fake_tenant'
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
rexray_preempt = mock_cluster.labels.get('rexray_preempt')
availability_zone = mock_cluster.labels.get(
'availability_zone')
number_of_secondary_masters = mock_cluster.master_count - 1
swarm_def = swarm_v2_tdef.AtomicSwarmTemplateDefinition()
swarm_def.get_params(mock_context, mock_cluster_template, mock_cluster)
expected_kwargs = {'extra_params': {
'magnum_url': mock_osc.magnum_url.return_value,
'auth_url': 'http://192.168.10.10:5000/v3',
'rexray_preempt': rexray_preempt,
'docker_volume_type': docker_volume_type,
'number_of_secondary_masters': number_of_secondary_masters,
'availability_zone': availability_zone,
'nodes_affinity_policy': 'soft-anti-affinity'}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
mock_cluster,
**expected_kwargs)
def test_swarm_get_heat_param(self):
swarm_def = swarm_v2_tdef.AtomicSwarmTemplateDefinition()
swarm_def.add_nodegroup_params(self.mock_cluster)
heat_param = swarm_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='worker_ng')
self.assertEqual('number_of_nodes', heat_param)
heat_param = swarm_def.get_heat_param(cluster_attr='uuid')
self.assertEqual('cluster_uuid', heat_param)
def test_swarm_get_scale_params(self):
mock_context = mock.MagicMock()
swarm_def = swarm_v2_tdef.AtomicSwarmTemplateDefinition()
self.assertEqual(
swarm_def.get_scale_params(mock_context, self.mock_cluster, 7),
{'number_of_nodes': 7})
def test_update_outputs(self):
swarm_def = swarm_v2_tdef.AtomicSwarmTemplateDefinition()
expected_api_address = 'updated_address'
expected_node_addresses = ['ex_minion', 'address']
outputs = [
{"output_value": expected_api_address,
"description": "No description given",
"output_key": "api_address"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_master_private"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_master"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_nodes_private"},
{"output_value": expected_node_addresses,
"description": "No description given",
"output_key": "swarm_nodes"},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster_template = mock.MagicMock()
swarm_def.update_outputs(mock_stack, mock_cluster_template,
self.mock_cluster)
expected_api_address = "tcp://%s:2375" % expected_api_address
self.assertEqual(expected_api_address, self.mock_cluster.api_address)
self.assertEqual(expected_node_addresses,
self.mock_cluster.default_ng_worker.node_addresses)
def test_update_outputs_master_address(self):
self._test_update_outputs_server_address(
public_ip_output_key='swarm_primary_master',
private_ip_output_key='swarm_primary_master_private',
nodegroup_attr='node_addresses',
is_master=True
)
def test_update_outputs_node_address(self):
self._test_update_outputs_server_address(
public_ip_output_key='swarm_nodes',
private_ip_output_key='swarm_nodes_private',
nodegroup_attr='node_addresses',
is_master=False
)
def test_update_outputs_master_address_fip_disabled(self):
self._test_update_outputs_server_address(
floating_ip_enabled=False,
public_ip_output_key='swarm_primary_master',
private_ip_output_key='swarm_primary_master_private',
nodegroup_attr='node_addresses',
is_master=True
)
def test_update_outputs_node_address_fip_disabled(self):
self._test_update_outputs_server_address(
floating_ip_enabled=False,
public_ip_output_key='swarm_nodes',
private_ip_output_key='swarm_nodes_private',
nodegroup_attr='node_addresses',
is_master=False
)
class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
def setUp(self):
super(AtomicSwarmTemplateDefinitionTestCase, self).setUp()
self.master_ng = mock.MagicMock(uuid='master_ng', role='master')
self.worker_ng = mock.MagicMock(uuid='worker_ng', role='worker')
self.nodegroups = [self.master_ng, self.worker_ng]
self.mock_cluster = mock.MagicMock(nodegroups=self.nodegroups,
default_ng_worker=self.worker_ng,
default_ng_master=self.master_ng)
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.swarm_fedora_atomic_v1.template_def'
'.AtomicSwarmTemplateDefinition.get_discovery_url')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
def test_swarm_get_params(self, mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class):
mock_context = mock.MagicMock()
mock_context.auth_token = 'AUTH_TOKEN'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster_template.registry_enabled = False
mock_cluster = mock.MagicMock()
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
del mock_cluster.stack_id
mock_osc = mock.MagicMock()
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
mock_osc_class.return_value = mock_osc
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_context.tenant = 'fake_tenant'
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
flannel_cidr = mock_cluster.labels.get('flannel_network_cidr')
flannel_subnet = mock_cluster.labels.get(
'flannel_network_subnetlen')
flannel_backend = mock_cluster.labels.get('flannel_backend')
rexray_preempt = mock_cluster.labels.get('rexray_preempt')
swarm_strategy = mock_cluster.labels.get('swarm_strategy')
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
swarm_def.get_params(mock_context, mock_cluster_template, mock_cluster)
expected_kwargs = {'extra_params': {
'discovery_url': 'fake_discovery_url',
'magnum_url': mock_osc.magnum_url.return_value,
'flannel_network_cidr': flannel_cidr,
'flannel_backend': flannel_backend,
'flannel_network_subnetlen': flannel_subnet,
'auth_url': 'http://192.168.10.10:5000/v3',
'rexray_preempt': rexray_preempt,
'swarm_strategy': swarm_strategy,
'docker_volume_type': docker_volume_type,
'nodes_affinity_policy': 'soft-anti-affinity'}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
mock_cluster,
**expected_kwargs)
@mock.patch('requests.get')
def test_swarm_validate_discovery_url(self, mock_get):
expected_result = str('{"action":"get","node":{"key":"test","value":'
'"1","modifiedIndex":10,"createdIndex":10}}')
mock_resp = mock.MagicMock()
mock_resp.text = expected_result
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
k8s_def.validate_discovery_url('http://etcd/test', 1)
@mock.patch('requests.get')
def test_swarm_validate_discovery_url_fail(self, mock_get):
mock_get.side_effect = req_exceptions.RequestException()
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.GetClusterSizeFailed,
k8s_def.validate_discovery_url,
'http://etcd/test', 1)
@mock.patch('requests.get')
def test_swarm_validate_discovery_url_invalid(self, mock_get):
mock_resp = mock.MagicMock()
mock_resp.text = str('{"action":"get"}')
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.InvalidClusterDiscoveryURL,
k8s_def.validate_discovery_url,
'http://etcd/test', 1)
@mock.patch('requests.get')
def test_swarm_validate_discovery_url_unexpect_size(self, mock_get):
expected_result = str('{"action":"get","node":{"key":"test","value":'
'"1","modifiedIndex":10,"createdIndex":10}}')
mock_resp = mock.MagicMock()
mock_resp.text = expected_result
mock_get.return_value = mock_resp
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
self.assertRaises(exception.InvalidClusterSize,
k8s_def.validate_discovery_url,
'http://etcd/test', 5)
@mock.patch('requests.get')
def test_swarm_get_discovery_url(self, mock_get):
CONF.set_override('etcd_discovery_service_endpoint_format',
'http://etcd/test?size=%(size)d',
group='cluster')
expected_discovery_url = 'http://etcd/token'
mock_resp = mock.MagicMock()
mock_resp.text = expected_discovery_url
mock_resp.status_code = 200
mock_get.return_value = mock_resp
mock_cluster = mock.MagicMock()
mock_cluster.discovery_url = None
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
discovery_url = swarm_def.get_discovery_url(mock_cluster)
mock_get.assert_called_once_with('http://etcd/test?size=1')
self.assertEqual(mock_cluster.discovery_url, expected_discovery_url)
self.assertEqual(discovery_url, expected_discovery_url)
@mock.patch('requests.get')
def test_swarm_get_discovery_url_not_found(self, mock_get):
mock_resp = mock.MagicMock()
mock_resp.text = ''
mock_resp.status_code = 200
mock_get.return_value = mock_resp
fake_cluster = mock.MagicMock()
fake_cluster.discovery_url = None
self.assertRaises(
exception.InvalidDiscoveryURL,
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
fake_cluster)
def test_swarm_get_heat_param(self):
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
swarm_def.add_nodegroup_params(self.mock_cluster)
heat_param = swarm_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='worker_ng')
self.assertEqual('number_of_nodes', heat_param)
def test_update_outputs(self):
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
expected_api_address = 'updated_address'
expected_node_addresses = ['ex_minion', 'address']
outputs = [
{"output_value": expected_api_address,
"description": "No description given",
"output_key": "api_address"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_master_private"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_master"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "swarm_nodes_private"},
{"output_value": expected_node_addresses,
"description": "No description given",
"output_key": "swarm_nodes"},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster_template = mock.MagicMock()
swarm_def.update_outputs(mock_stack, mock_cluster_template,
self.mock_cluster)
expected_api_address = "tcp://%s:2376" % expected_api_address
self.assertEqual(expected_api_address, self.mock_cluster.api_address)
self.assertEqual(expected_node_addresses,
self.worker_ng.node_addresses)
class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
def setUp(self):
super(UbuntuMesosTemplateDefinitionTestCase, self).setUp()
self.master_ng = mock.MagicMock(uuid='master_ng', role='master')
self.worker_ng = mock.MagicMock(uuid='worker_ng', role='worker')
self.nodegroups = [self.master_ng, self.worker_ng]
self.mock_cluster = mock.MagicMock(nodegroups=self.nodegroups,
default_ng_worker=self.worker_ng,
default_ng_master=self.master_ng)
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition'
'.get_params')
def test_mesos_get_params(self,
mock_get_params,
mock_osc_class):
mock_context = mock.MagicMock()
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'mesos_user'
mock_context.project_id = 'admin'
mock_context.domain_name = 'domainname'
mock_cluster_template = mock.MagicMock()
mock_cluster_template.tls_disabled = False
mock_cluster = mock.MagicMock()
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
del mock_cluster.stack_id
rexray_preempt = mock_cluster.labels.get('rexray_preempt')
mesos_slave_isolation = mock_cluster.labels.get(
'mesos_slave_isolation')
mesos_slave_work_dir = mock_cluster.labels.get(
'mesos_slave_work_dir')
mesos_slave_image_providers = mock_cluster.labels.get(
'image_providers')
mesos_slave_executor_env_variables = mock_cluster.labels.get(
'mesos_slave_executor_env_variables')
mock_osc = mock.MagicMock()
mock_osc.cinder_region_name.return_value = 'RegionOne'
mock_osc_class.return_value = mock_osc
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
CONF.set_override('nodes_affinity_policy',
'anti-affinity',
group='cluster')
mesos_def.get_params(mock_context, mock_cluster_template, mock_cluster)
expected_kwargs = {'extra_params': {
'region_name': mock_osc.cinder_region_name.return_value,
'nodes_affinity_policy': 'anti-affinity',
'auth_url': 'http://192.168.10.10:5000/v3',
'username': 'mesos_user',
'tenant_name': 'admin',
'domain_name': 'domainname',
'rexray_preempt': rexray_preempt,
'mesos_slave_isolation': mesos_slave_isolation,
'mesos_slave_work_dir': mesos_slave_work_dir,
'mesos_slave_executor_env_variables':
mesos_slave_executor_env_variables,
'mesos_slave_image_providers': mesos_slave_image_providers}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
mock_cluster,
**expected_kwargs)
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.heat.template_def.TemplateDefinition'
'.get_output')
def test_mesos_get_scale_params(self, mock_get_output,
mock_osc_class):
mock_context = mock.MagicMock()
mock_cluster = mock.MagicMock()
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
removal_nodes = ['node1', 'node2']
node_count = 7
mock_scale_manager = mock.MagicMock()
mock_scale_manager.get_removal_nodes.return_value = removal_nodes
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
scale_params = mesos_def.get_scale_params(
mock_context,
mock_cluster,
node_count,
mock_scale_manager)
expected_scale_params = {'slaves_to_remove': ['node1', 'node2'],
'number_of_slaves': 7}
self.assertEqual(scale_params, expected_scale_params)
def test_mesos_get_heat_param(self):
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
mesos_def.add_nodegroup_params(self.mock_cluster)
heat_param = mesos_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='worker_ng')
self.assertEqual('number_of_slaves', heat_param)
heat_param = mesos_def.get_heat_param(nodegroup_attr='node_count',
nodegroup_uuid='master_ng')
self.assertEqual('number_of_masters', heat_param)
def test_update_outputs(self):
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
expected_api_address = 'updated_address'
expected_node_addresses = ['ex_slave', 'address']
expected_master_addresses = ['ex_master', 'address']
outputs = [
{"output_value": expected_api_address,
"description": "No description given",
"output_key": "api_address"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "mesos_master_private"},
{"output_value": expected_master_addresses,
"description": "No description given",
"output_key": "mesos_master"},
{"output_value": ['any', 'output'],
"description": "No description given",
"output_key": "mesos_slaves_private"},
{"output_value": expected_node_addresses,
"description": "No description given",
"output_key": "mesos_slaves"},
]
mock_stack = mock.MagicMock()
mock_stack.to_dict.return_value = {'outputs': outputs}
mock_cluster_template = mock.MagicMock()
mesos_def.update_outputs(mock_stack, mock_cluster_template,
self.mock_cluster)
self.assertEqual(expected_api_address, self.mock_cluster.api_address)
self.assertEqual(expected_node_addresses,
self.mock_cluster.default_ng_worker.node_addresses)
self.assertEqual(expected_master_addresses,
self.mock_cluster.default_ng_master.node_addresses)
| 45.310865
| 82
| 0.644594
|
acfc0e7bb3e604a983a738acfcd84ab832253eb9
| 201
|
py
|
Python
|
fabrics/__init__.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | null | null | null |
fabrics/__init__.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | 4
|
2017-12-25T12:32:42.000Z
|
2018-01-02T13:17:40.000Z
|
fabrics/__init__.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: theo-l
# @Date: 2017-06-28 20:38:55
# @Last Modified by: theo-l
# @Last Modified time: 2017-07-05 11:46:11
from .hosts_config import *
from .requirements import *
| 28.714286
| 42
| 0.656716
|
acfc0e9ae799ca753f65cdf6792b70013ad28052
| 741
|
py
|
Python
|
20200513_一个头两个大_hash头像/decode.py
|
ctfwiki/subject_misc_ctfshow
|
2a51f5bc12e9c136841f9cc3ef88ed53df952054
|
[
"MIT"
] | 16
|
2020-09-26T06:17:57.000Z
|
2022-03-03T15:41:07.000Z
|
20200513_一个头两个大_hash头像/decode.py
|
ctfwiki/subject_misc_ctfshow
|
2a51f5bc12e9c136841f9cc3ef88ed53df952054
|
[
"MIT"
] | null | null | null |
20200513_一个头两个大_hash头像/decode.py
|
ctfwiki/subject_misc_ctfshow
|
2a51f5bc12e9c136841f9cc3ef88ed53df952054
|
[
"MIT"
] | 6
|
2020-08-30T09:09:08.000Z
|
2021-11-28T02:09:39.000Z
|
import hashlib
from PIL import Image
charset = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ{}"
hash = []
for i in range(10):
bin = ""
img = Image.open("data/%s.png"%i)
for x in range(2,-1,-1):
for y in range(5):
r,g,b,a = img.getpixel(((x*2+2)*35,(y*2+2)*35))
bin += "1" if r == 240 else "0"
print(bin)
hash.append(bin)
dic = {}
for c1 in charset:
for c2 in charset:
s = c1 + c2
h = hashlib.md5(s.encode()).hexdigest()
bin = ""
for c in h[:15]:
bin += "1" if int(c,16) % 2 == 1 else "0"
if bin in hash:
dic[hash.index(bin)] = s
# print(s)
for i in range(10):
print(dic[i],end="")
| 22.454545
| 76
| 0.512821
|
acfc0f079c86181c21ba7b2fe05cfd0a2c50000c
| 2,646
|
py
|
Python
|
supervisor/resolution/checks/base.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 584
|
2020-01-31T18:53:10.000Z
|
2022-03-29T21:12:15.000Z
|
supervisor/resolution/checks/base.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 1,056
|
2020-01-30T09:59:44.000Z
|
2022-03-31T10:15:32.000Z
|
supervisor/resolution/checks/base.py
|
pnjongang/supervisor
|
2a006ae76de4b06e3e291b37aa2a4e14dc272445
|
[
"Apache-2.0"
] | 295
|
2020-02-03T11:30:42.000Z
|
2022-03-31T18:53:14.000Z
|
"""Baseclass for system checks."""
from abc import ABC, abstractmethod
import logging
from typing import Optional
from ...const import ATTR_ENABLED, CoreState
from ...coresys import CoreSys, CoreSysAttributes
from ..const import ContextType, IssueType
_LOGGER: logging.Logger = logging.getLogger(__name__)
class CheckBase(ABC, CoreSysAttributes):
"""Baseclass for check."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize the checks class."""
self.coresys = coresys
async def __call__(self) -> None:
"""Execute the evaluation."""
if self.sys_core.state not in self.states:
return
# Check if system is affected by the issue
affected: bool = False
for issue in self.sys_resolution.issues:
if issue.type != self.issue or issue.context != self.context:
continue
affected = True
# Check if issue still exists
_LOGGER.debug(
"Run approve check for %s/%s - %s",
self.issue,
self.context,
issue.reference,
)
if await self.approve_check(reference=issue.reference):
continue
self.sys_resolution.dismiss_issue(issue)
# System is not affected
if affected and self.context not in (ContextType.ADDON, ContextType.PLUGIN):
return
_LOGGER.info("Run check for %s/%s", self.issue, self.context)
await self.run_check()
@property
def slug(self) -> str:
"""Return the check slug."""
return self.__class__.__module__.rsplit(".", maxsplit=1)[-1]
@abstractmethod
async def run_check(self) -> None:
"""Run check if not affected by issue."""
@abstractmethod
async def approve_check(self, reference: Optional[str] = None) -> bool:
"""Approve check if it is affected by issue."""
@property
@abstractmethod
def issue(self) -> IssueType:
"""Return a IssueType enum."""
@property
@abstractmethod
def context(self) -> ContextType:
"""Return a ContextType enum."""
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this check can run."""
return []
@property
def enabled(self) -> bool:
"""Return True if the check is enabled."""
return self.sys_resolution.check.data[self.slug][ATTR_ENABLED]
@enabled.setter
def enabled(self, value: bool) -> None:
"""Enable or disbable check."""
self.sys_resolution.check.data[self.slug][ATTR_ENABLED] = value
| 30.767442
| 84
| 0.613757
|
acfc0f1e6974b92da1506403bed9a6f6022c1cb7
| 3,052
|
py
|
Python
|
scripts/python-bindings/test_python_bindings.py
|
SKA-ScienceDataProcessor/FastImaging
|
8103fb4d20434ffdc45dee7471dafc6be66354bb
|
[
"Apache-2.0"
] | 7
|
2017-02-13T11:21:21.000Z
|
2020-07-20T16:07:39.000Z
|
scripts/python-bindings/test_python_bindings.py
|
SKA-ScienceDataProcessor/FastImaging
|
8103fb4d20434ffdc45dee7471dafc6be66354bb
|
[
"Apache-2.0"
] | 15
|
2016-09-11T11:14:35.000Z
|
2017-08-29T14:21:46.000Z
|
scripts/python-bindings/test_python_bindings.py
|
SKA-ScienceDataProcessor/FastImaging
|
8103fb4d20434ffdc45dee7471dafc6be66354bb
|
[
"Apache-2.0"
] | 4
|
2016-10-28T16:17:08.000Z
|
2021-12-22T12:11:12.000Z
|
import stp_python
import numpy as np
# Input simdata file must be located in the current directory
vis_filepath = 'fivesrcdata_awproj.npz'
# This example is not computing residual visibilities. 'vis' component is directly used as input to the pipeline
with open(vis_filepath, 'rb') as f:
npz_data_dict = np.load(f)
uvw_lambda = npz_data_dict['uvw_lambda']
vis = npz_data_dict['vis']
snr_weights = npz_data_dict['snr_weights']
lha = npz_data_dict['lha']
# Parameters of image_visibilities function
image_size = 2048
cell_size = 60
padding_factor = 1.0
kernel_func_name = stp_python.KernelFunction.Gaussian
kernel_support = 3
kernel_exact = False
kernel_oversampling = 8
generate_beam = False
grid_image_correction = True
analytic_gcf = False
fft_routine = stp_python.FFTRoutine.FFTW_ESTIMATE_FFT
fft_wisdom_filename = ""
num_wplanes = 128
wplanes_median = False
max_wpconv_support = 127
hankel_opt = True
hankel_proj_slice = True
undersampling_opt = 1
kernel_trunc_perc = 1.0
interp_type = stp_python.InterpType.CUBIC
aproj_numtimesteps = 0
obs_dec = 47.339
obs_ra = 194.24
aproj_opt=False
aproj_mask_perc=0.0
pbeam_coefs = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0])
# Call image_visibilities
cpp_img, cpp_beam = stp_python.image_visibilities_wrapper(
vis,
snr_weights,
uvw_lambda,
image_size,
cell_size,
padding_factor,
kernel_func_name,
kernel_support,
kernel_exact,
kernel_oversampling,
generate_beam,
grid_image_correction,
analytic_gcf,
fft_routine,
fft_wisdom_filename,
num_wplanes,
wplanes_median,
max_wpconv_support,
hankel_opt,
hankel_proj_slice,
undersampling_opt,
kernel_trunc_perc,
interp_type,
aproj_numtimesteps,
obs_dec,
obs_ra,
aproj_opt,
aproj_mask_perc,
lha,
pbeam_coefs
)
# Parameters of source_find function
detection_n_sigma = 20.0
analysis_n_sigma = 20.0
rms_est = 0.0
find_negative = True
sigma_clip_iters = 5
median_method = stp_python.MedianMethod.BINAPPROX # Other options: stp_python.MedianMethod.ZEROMEDIAN, stp_python.MedianMethod.BINMEDIAN, stp_python.MedianMethod.NTHELEMENT
gaussian_fitting = True
ccl_4connectivity = False
generate_labelmap = False
source_min_area = 5
ceres_diffmethod = stp_python.CeresDiffMethod.AnalyticDiff_SingleResBlk # Other options: stp_python.CeresDiffMethod.AnalyticDiff, stp_python.CeresDiffMethod.AutoDiff_SingleResBlk, stp_python.CeresDiffMethod.AutoDiff
ceres_solvertype = stp_python.CeresSolverType.LinearSearch_LBFGS # Other options: stp_python.CeresSolverType.LinearSearch_BFGS, stp_python.CeresSolverType.TrustRegion_DenseQR
# Call source_find
islands = stp_python.source_find_wrapper(
cpp_img,
detection_n_sigma,
analysis_n_sigma,
rms_est,
find_negative,
sigma_clip_iters,
median_method,
gaussian_fitting,
ccl_4connectivity,
generate_labelmap,
source_min_area,
ceres_diffmethod,
ceres_solvertype
)
# Print result
for i in islands:
print(i)
print()
| 27.00885
| 215
| 0.770315
|
acfc0f6d2a154b5c3f231db53702f57a62458eb8
| 3,873
|
py
|
Python
|
tests/system/action/chat_group/test_update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
tests/system/action/chat_group/test_update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
tests/system/action/chat_group/test_update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class ChatGroupUpdate(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.test_models: Dict[str, Dict[str, Any]] = {
"organization/1": {"enable_chat": True},
"meeting/1": {"is_active_in_organization_id": 1},
"chat_group/1": {
"meeting_id": 1,
"name": "redekreis1",
"read_group_ids": [1],
"write_group_ids": [2],
},
"group/1": {"meeting_id": 1, "read_chat_group_ids": [1]},
"group/2": {"meeting_id": 1, "write_chat_group_ids": [1]},
"group/3": {"meeting_id": 1},
}
def test_update(self) -> None:
self.set_models(self.test_models)
response = self.request(
"chat_group.update",
{"id": 1, "name": "test", "read_group_ids": [2], "write_group_ids": [2, 3]},
)
self.assert_status_code(response, 200)
self.assert_model_exists(
"chat_group/1",
{"name": "test", "read_group_ids": [2], "write_group_ids": [2, 3]},
)
self.assert_model_exists("group/1", {"read_chat_group_ids": []})
self.assert_model_exists(
"group/2", {"read_chat_group_ids": [1], "write_chat_group_ids": [1]}
)
self.assert_model_exists("group/3", {"write_chat_group_ids": [1]})
def test_update_not_enabled(self) -> None:
self.test_models["organization/1"]["enable_chat"] = False
self.set_models(self.test_models)
response = self.request("chat_group.update", {"id": 1, "name": "test"})
self.assert_status_code(response, 400)
assert "Chat is not enabled." in response.json["message"]
def test_update_group_from_different_meeting(self) -> None:
self.set_models(
{
"organization/1": {"enable_chat": True},
"meeting/1": {"is_active_in_organization_id": 1},
"meeting/2": {"is_active_in_organization_id": 1},
"chat_group/1": {
"meeting_id": 1,
"name": "redekreis1",
},
"group/1": {"meeting_id": 2},
}
)
response = self.request(
"chat_group.update",
{"id": 1, "name": "test", "read_group_ids": [1], "write_group_ids": [1]},
)
self.assert_status_code(response, 400)
assert (
"The following models do not belong to meeting 1: ['group/1']"
in response.json["message"]
)
def test_update_no_permissions(self) -> None:
self.base_permission_test(
self.test_models, "chat_group.update", {"id": 1, "name": "test"}
)
def test_update_permissions(self) -> None:
self.base_permission_test(
self.test_models,
"chat_group.update",
{"id": 1, "name": "test"},
Permissions.Chat.CAN_MANAGE,
)
def test_update_not_unique_name(self) -> None:
self.set_models(
{
"organization/1": {"enable_chat": True},
"meeting/1": {"is_active_in_organization_id": 1},
"chat_group/1": {
"meeting_id": 1,
"name": "redekreis1",
},
"chat_group/2": {
"meeting_id": 1,
"name": "test",
},
}
)
response = self.request(
"chat_group.update",
{"id": 1, "name": "test"},
)
self.assert_status_code(response, 400)
assert "The name of a chat group must be unique." == response.json["message"]
| 36.885714
| 88
| 0.525174
|
acfc0f931a981c1fa44f448223546383bc058c22
| 1,520
|
py
|
Python
|
python_grpc_mutual_tls_auth/server.py
|
ychen47/python-grpc-mutual-tls-auth
|
a347085162bbd23034d9cc8fb5b71cfb931e7d99
|
[
"MIT"
] | 9
|
2019-04-04T03:04:55.000Z
|
2021-10-19T18:37:09.000Z
|
python_grpc_mutual_tls_auth/server.py
|
ychen47/python-grpc-mutual-tls-auth
|
a347085162bbd23034d9cc8fb5b71cfb931e7d99
|
[
"MIT"
] | 1
|
2019-04-04T03:04:32.000Z
|
2020-04-08T06:22:07.000Z
|
python_grpc_mutual_tls_auth/server.py
|
ychen47/python-grpc-mutual-tls-auth
|
a347085162bbd23034d9cc8fb5b71cfb931e7d99
|
[
"MIT"
] | 3
|
2021-01-17T15:31:09.000Z
|
2021-05-19T00:21:10.000Z
|
import time
import grpc
from concurrent import futures
import mutual_tls_auth_pb2 as serializer
import mutual_tls_auth_pb2_grpc as grpc_lib
from utils import secrets, config
class GatewayServicer(grpc_lib.GatewayServicer):
def loan_orders(self, request, context):
point = serializer.LoanPoint(
rate='0.010000',
amount='5.250',
range_min=2,
range_max=2
)
orders = serializer.Orders(
offers=[point, point, point],
demands=[point]
)
return orders
def serve(cfg):
# Client will verify the server using server cert and the server
# will verify the client using client cert.
server_credentials = grpc.ssl_server_credentials(
[(secrets.load(cfg['credentials']['server']['key']),
secrets.load(cfg['credentials']['server']['cert']))],
root_certificates=secrets.load(cfg['credentials']['client']['cert']),
require_client_auth=True
)
# this statement needs to be tested:
# it's okay to have multiple threads because poloniex api call uses thread lock
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
grpc_lib.add_GatewayServicer_to_server(GatewayServicer(), server)
server.add_secure_port('[::]:50051', server_credentials)
server.start()
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve(config.invoke_config())
| 31.020408
| 83
| 0.661842
|
acfc0fba2694782affe1dd61d3dc41e1f762271f
| 5,568
|
py
|
Python
|
YouCompleteMe/cxx.ycm_extra_conf-Selene-Wind.py
|
pajamapants3000/vimfiles
|
912d30626db2078e155efc2e42348ddf371fd1ef
|
[
"MIT"
] | 1
|
2017-04-18T09:02:48.000Z
|
2017-04-18T09:02:48.000Z
|
YouCompleteMe/cxx.ycm_extra_conf-Selene-Wind.py
|
pajamapants3000/vimfiles
|
912d30626db2078e155efc2e42348ddf371fd1ef
|
[
"MIT"
] | null | null | null |
YouCompleteMe/cxx.ycm_extra_conf-Selene-Wind.py
|
pajamapants3000/vimfiles
|
912d30626db2078e155efc2e42348ddf371fd1ef
|
[
"MIT"
] | null | null | null |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-std=gnu++11',
'-x',
'c++',
'-isystem',
'C:/mingw-w64/x86_64-5.2.0-posix-seh-rt_v4-rev0/mingw64/x86_64-w64-mingw32/include',
'-isystem',
'C:/mingw-w64/x86_64-5.2.0-posix-seh-rt_v4-rev0/mingw64/x86_64-w64-mingw32/include/c++',
'-isystem',
'C:/mingw-w64/x86_64-5.2.0-posix-seh-rt_v4-rev0/mingw64/x86_64-w64-mingw32/include/c++/x86_64-w64-mingw32',
'-isystem',
'C:/Users/otrip/_local/include',
'-LC:/mingw-w64/x86_64-5.2.0-posix-seh-rt_v4-rev0/mingw64/x86_64-w64-mingw32/lib',
'-LC:/Users/otrip/_local/lib'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh', '.tcc' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 35.692308
| 115
| 0.702586
|
acfc11b4150099509907da3680a929421e9093ea
| 1,524
|
py
|
Python
|
azure-mgmt-authorization/azure/mgmt/authorization/models/provider_operation.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:16:34.000Z
|
2018-11-09T06:16:34.000Z
|
azure-mgmt-authorization/azure/mgmt/authorization/models/provider_operation.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | null | null | null |
azure-mgmt-authorization/azure/mgmt/authorization/models/provider_operation.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:17:41.000Z
|
2018-11-09T06:17:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProviderOperation(Model):
"""Operation.
:param name: The operation name.
:type name: str
:param display_name: The operation display name.
:type display_name: str
:param description: The operation description.
:type description: str
:param origin: The operation origin.
:type origin: str
:param properties: The operation properties.
:type properties: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(self, name=None, display_name=None, description=None, origin=None, properties=None):
self.name = name
self.display_name = display_name
self.description = description
self.origin = origin
self.properties = properties
| 34.636364
| 101
| 0.59252
|
acfc123cf61bab6849320192fe42913aece4a4e9
| 107
|
py
|
Python
|
Codewars/7kyu/case-sensitive-1/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/7kyu/case-sensitive-1/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/7kyu/case-sensitive-1/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
case_sensitive = lambda s: (lambda uc = [*filter(str.isupper, s)]: [len(uc) == 0, uc])()
| 26.75
| 88
| 0.588785
|
acfc12533869440ef3cfc405fab75c04ba14587a
| 5,911
|
py
|
Python
|
kubernetes/client/models/flowcontrol_v1alpha1_subject.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 2
|
2021-03-09T12:42:05.000Z
|
2021-03-09T13:27:50.000Z
|
kubernetes/client/models/flowcontrol_v1alpha1_subject.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 7
|
2021-04-13T03:04:42.000Z
|
2022-03-02T03:10:18.000Z
|
kubernetes/client/models/flowcontrol_v1alpha1_subject.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 1
|
2021-06-13T09:21:37.000Z
|
2021-06-13T09:21:37.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.17
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class FlowcontrolV1alpha1Subject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'V1alpha1GroupSubject',
'kind': 'str',
'service_account': 'V1alpha1ServiceAccountSubject',
'user': 'V1alpha1UserSubject'
}
attribute_map = {
'group': 'group',
'kind': 'kind',
'service_account': 'serviceAccount',
'user': 'user'
}
def __init__(self, group=None, kind=None, service_account=None, user=None, local_vars_configuration=None): # noqa: E501
"""FlowcontrolV1alpha1Subject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._kind = None
self._service_account = None
self._user = None
self.discriminator = None
if group is not None:
self.group = group
self.kind = kind
if service_account is not None:
self.service_account = service_account
if user is not None:
self.user = user
@property
def group(self):
"""Gets the group of this FlowcontrolV1alpha1Subject. # noqa: E501
:return: The group of this FlowcontrolV1alpha1Subject. # noqa: E501
:rtype: V1alpha1GroupSubject
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this FlowcontrolV1alpha1Subject.
:param group: The group of this FlowcontrolV1alpha1Subject. # noqa: E501
:type: V1alpha1GroupSubject
"""
self._group = group
@property
def kind(self):
"""Gets the kind of this FlowcontrolV1alpha1Subject. # noqa: E501
Required # noqa: E501
:return: The kind of this FlowcontrolV1alpha1Subject. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this FlowcontrolV1alpha1Subject.
Required # noqa: E501
:param kind: The kind of this FlowcontrolV1alpha1Subject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def service_account(self):
"""Gets the service_account of this FlowcontrolV1alpha1Subject. # noqa: E501
:return: The service_account of this FlowcontrolV1alpha1Subject. # noqa: E501
:rtype: V1alpha1ServiceAccountSubject
"""
return self._service_account
@service_account.setter
def service_account(self, service_account):
"""Sets the service_account of this FlowcontrolV1alpha1Subject.
:param service_account: The service_account of this FlowcontrolV1alpha1Subject. # noqa: E501
:type: V1alpha1ServiceAccountSubject
"""
self._service_account = service_account
@property
def user(self):
"""Gets the user of this FlowcontrolV1alpha1Subject. # noqa: E501
:return: The user of this FlowcontrolV1alpha1Subject. # noqa: E501
:rtype: V1alpha1UserSubject
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this FlowcontrolV1alpha1Subject.
:param user: The user of this FlowcontrolV1alpha1Subject. # noqa: E501
:type: V1alpha1UserSubject
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlowcontrolV1alpha1Subject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FlowcontrolV1alpha1Subject):
return True
return self.to_dict() != other.to_dict()
| 29.262376
| 124
| 0.607173
|
acfc12b3f721378f1da2de90a534667d1163e22a
| 5,580
|
py
|
Python
|
data/p3BR/R2/benchmark/startQiskit_QC74.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_QC74.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_QC74.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=10
prog.cz(input_qubit[2],input_qubit[1]) # number=11
prog.h(input_qubit[1]) # number=12
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC74.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.21466
| 140
| 0.629211
|
acfc142c945990e9af6a67a9431c43ed08527639
| 2,658
|
py
|
Python
|
patronclient/v2/flavor_access.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patronclient/v2/flavor_access.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patronclient/v2/flavor_access.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Flavor access interface."""
from patronclient import base
from patronclient.i18n import _
class FlavorAccess(base.Resource):
def __repr__(self):
return ("<FlavorAccess flavor id: %s, tenant id: %s>" %
(self.flavor_id, self.tenant_id))
class FlavorAccessManager(base.ManagerWithFind):
"""
Manage :class:`FlavorAccess` resources.
"""
resource_class = FlavorAccess
def list(self, **kwargs):
if kwargs.get('flavor'):
return self._list_by_flavor(kwargs['flavor'])
elif kwargs.get('tenant'):
return self._list_by_tenant(kwargs['tenant'])
else:
raise NotImplementedError(_('Unknown list options.'))
def _list_by_flavor(self, flavor):
return self._list('/flavors/%s/os-flavor-access' % base.getid(flavor),
'flavor_access')
def _list_by_tenant(self, tenant):
"""Print flavor list shared with the given tenant."""
# TODO(uni): need to figure out a proper URI for list_by_tenant
# since current API already provided current tenant_id information
raise NotImplementedError(_('Sorry, query by tenant not supported.'))
def add_tenant_access(self, flavor, tenant):
"""Add a tenant to the given flavor access list."""
info = {'tenant': tenant}
return self._action('addTenantAccess', flavor, info)
def remove_tenant_access(self, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
info = {'tenant': tenant}
return self._action('removeTenantAccess', flavor, info)
def _action(self, action, flavor, info, **kwargs):
"""Perform a flavor action."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/flavors/%s/action' % base.getid(flavor)
_resp, body = self.api.client.post(url, body=body)
return [self.resource_class(self, res)
for res in body['flavor_access']]
| 37.43662
| 78
| 0.659142
|
acfc1440e389e94c09a5040127a2080296b13e6b
| 9,074
|
py
|
Python
|
doc/rst_tools/docmodel.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | 1
|
2021-06-16T19:10:54.000Z
|
2021-06-16T19:10:54.000Z
|
doc/rst_tools/docmodel.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | null | null | null |
doc/rst_tools/docmodel.py
|
tizenorg/platform.upstream.krb5
|
a98efd0c8f97aba9d71c2130c048f1adc242772e
|
[
"MIT"
] | null | null | null |
'''
Copyright 2011 by the Massachusetts
Institute of Technology. All Rights Reserved.
Export of this software from the United States of America may
require a specific license from the United States Government.
It is the responsibility of any person or organization contemplating
export to obtain such a license before exporting.
WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
distribute this software and its documentation for any purpose and
without fee is hereby granted, provided that the above copyright
notice appear in all copies and that both that copyright notice and
this permission notice appear in supporting documentation, and that
the name of M.I.T. not be used in advertising or publicity pertaining
to distribution of the software without specific, written prior
permission. Furthermore if you modify this software you must label
your software as modified software and not distribute it in such a
fashion that it might be confused with the original M.I.T. software.
M.I.T. makes no representations about the suitability of
this software for any purpose. It is provided "as is" without express
or implied warranty.
'''
import re
from Cheetah.Template import Template
class Attribute(object):
def __init__(self, **argkw):
self.definition = argkw.get('definition')
self.name = argkw.get('name')
self.type = argkw.get('type')
self.typeId = argkw.get('typeId')
self.short_description = argkw.get('short_description')
self.long_description = argkw.get('long_description')
self.version = argkw.get('version')
def __repr__(self):
result = list()
for (attr,value) in self.__dict__.iteritems():
result.append('%s=%s' % (attr,value))
return 'Attribute: %s' % ','.join(result)
class CompositeType():
def __init__(self, **argkw):
self.category = 'composite'
self.definition = argkw.get('definition')
self.name = argkw.get('name')
self.Id = argkw.get('Id')
self.initializer = argkw.get('initializer')
self.active = argkw.get('active', False)
self.version = argkw.get('version')
self.return_type = argkw.get('return_type')
self.short_description = argkw.get('short_description')
self.long_description = argkw.get('long_description')
self.friends = argkw.get('friends')
self.type = argkw.get('type')
self.attributes = self._setAttributes(argkw.get('attributes'))
def __repr__(self):
result = list()
for (attr,value) in self.__dict__.iteritems():
if attr == 'attributes':
if value is not None:
attributes = ['%s' % a for a in value]
value = '\n %s' % '\n '.join(attributes)
result.append('%s: %s' % (attr,value))
result = '\n'.join(result)
return result
def _setAttributes(self, attributes):
result = None
if attributes is not None:
result = list()
for a in attributes:
result.append(Attribute(**a))
return result
def struct_reference(self, name):
result = re.sub(r'_', '-', name)
result = '_%s-struct' % result
return result
def macro_reference(self, name):
result = re.sub(r'_', '-', name)
result = '_%s-data' % result
return result
class Parameter(object):
def __init__(self, **argkw):
self.seqno = argkw.get('seqno')
self.name = argkw.get('name')
self.direction = argkw.get('direction')
self.type = argkw.get('type')
self.typeId = argkw.get('typeId')
self.description = argkw.get('description')
self.version = argkw.get('version')
def __repr__(self):
content = (self.name,self.direction,self.seqno,self.type,self.typeId,self.description)
return 'Parameter: name=%s,direction=%s,seqno=%s,type=%s,typeId=%s,descr=%s' % content
class Function(object):
def __init__(self, **argkw):
self.category = 'function'
self.name = argkw.get('name')
self.Id = argkw.get('Id')
self.active = argkw.get('active', False)
self.version = argkw.get('version')
self.parameters = self._setParameters(argkw.get('parameters'))
self.return_type = argkw.get('return_type')
self.return_description = argkw.get('return_description')
self.retval_description = argkw.get('retval_description')
self.warn_description = argkw.get('warn_description')
self.sa_description = argkw.get('sa_description')
self.notes_description = argkw.get('notes_description')
self.short_description = argkw.get('short_description')
self.long_description = argkw.get('long_description')
self.deprecated_description = argkw.get('deprecated_description')
self.friends = argkw.get('friends')
def _setParameters(self, parameters):
result = None
if parameters is not None:
result = list()
for p in parameters:
result.append(Parameter(**p))
return result
def getObjectRow(self):
result = [str(self.Id),
self.name,
self.category]
return ','.join(result)
def getObjectDescriptionRow(self):
result = [self.Id,
self.active,
self.version,
self.short_description,
self.long_description]
return ','.join(result)
def getParameterRows(self):
result = list()
for p in self.parameters:
p_row = [self.Id,
p.name,
p.seqno,
p.type,
p.typeId,
p.description,
p.version]
result.append(','.join(p_row))
return '\n'.join(result)
def __repr__(self):
lines = list()
lines.append('Category: %s' % self.category)
lines.append('Function name: %s' % self.name)
lines.append('Function Id: %s' % self.Id)
parameters = [' %s' % p for p in self.parameters]
lines.append('Parameters:\n%s' % '\n'.join(parameters))
lines.append('Function return type: %s' % self.return_type)
lines.append('Function return type description:\n%s' % self.return_description)
lines.append('Function retval description:\n%s' % self.retval_description)
lines.append('Function short description:\n%s' % self.short_description)
lines.append('Function long description:\n%s' % self.long_description)
lines.append('Warning description:\n%s' % self.warn_description)
lines.append('See also description:\n%s' % self.sa_description)
lines.append('NOTE description:\n%s' % self.notes_description)
lines.append('Deprecated description:\n%s' % self.deprecated_description)
result = '\n'.join(lines)
return result
class DocModel(object):
def __init__(self, **argkw):
if len(argkw):
self.name = argkw['name']
if argkw['category'] == 'function':
self.category = 'function'
self.function = Function(**argkw)
elif argkw['category'] == 'composite':
self.category = 'composite'
self.composite = CompositeType(**argkw)
def __repr__(self):
obj = getattr(self,self.category)
print type(obj)
return str(obj)
def signature(self):
param_list = list()
for p in self.function.parameters:
if p.type is "... " :
param_list.append('%s %s' % (p.type,' '))
else:
param_list.append('%s %s' % (p.type, p.name))
param_list = ', '.join(param_list)
result = '%s %s(%s)' % (self.function.return_type,
self.function.name, param_list)
return result
def save(self, path, template_path):
f = open(template_path, 'r')
t = Template(f.read(),self)
out = open(path, 'w')
out.write(str(t))
out.close()
f.close()
class DocModelTest(DocModel):
def __init__(self):
doc_path = '../docutil/example.yml'
argkw = yaml.load(open(doc_path,'r'))
super(DocModelTest,self).__init__(**argkw)
def run_tests(self):
self.test_save()
def test_print(self):
print 'testing'
print self
def test_save(self):
template_path = '../docutil/function2edit.html'
path = '/var/tsitkova/Sources/v10/trunk/documentation/test_doc.html'
self.save(path, template_path)
if __name__ == '__main__':
tester = DocModelTest()
tester.run_tests()
| 36.296
| 94
| 0.59114
|
acfc1443084ce5fca72069c56d48999a3274b3ed
| 216
|
py
|
Python
|
Names/45.Names.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
Names/45.Names.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
Names/45.Names.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
#Language = python
#ID = 2529
#QLink = https://quera.org/problemset/2529/
#Author = AmirKhaksar
n = int(input())
lister = []
for i in range(n):
lister.append(input())
print(max([len(set(i)) for i in lister]))
| 16.615385
| 43
| 0.648148
|
acfc152af8d834767e3b9c2ac9fd980dfb34a673
| 5,539
|
py
|
Python
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/search.py
|
AsgerPetersen/stac-fastapi
|
27e134589107654920c2f1dba54773c8c85d4e1a
|
[
"MIT"
] | 1
|
2021-12-01T08:20:10.000Z
|
2021-12-01T08:20:10.000Z
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/search.py
|
AsgerPetersen/stac-fastapi
|
27e134589107654920c2f1dba54773c8c85d4e1a
|
[
"MIT"
] | null | null | null |
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/search.py
|
AsgerPetersen/stac-fastapi
|
27e134589107654920c2f1dba54773c8c85d4e1a
|
[
"MIT"
] | null | null | null |
"""stac_fastapi.types.search module.
# TODO: replace with stac-pydantic
"""
import logging
import operator
from dataclasses import dataclass
from enum import auto
from types import DynamicClassAttribute
from typing import Any, Callable, Dict, List, Optional, Set, Union
import sqlalchemy as sa
from pydantic import Field, ValidationError, conint, root_validator
from pydantic.error_wrappers import ErrorWrapper
from stac_pydantic.api import Search
from stac_pydantic.api.extensions.fields import FieldsExtension as FieldsBase
from stac_pydantic.utils import AutoValueEnum
from stac_fastapi.types.config import Settings
logger = logging.getLogger("uvicorn")
logger.setLevel(logging.INFO)
# Be careful: https://github.com/samuelcolvin/pydantic/issues/1423#issuecomment-642797287
NumType = Union[float, int]
class Operator(str, AutoValueEnum):
"""Defines the set of operators supported by the API."""
eq = auto()
ne = auto()
lt = auto()
lte = auto()
gt = auto()
gte = auto()
# TODO: These are defined in the spec but aren't currently implemented by the api
# startsWith = auto()
# endsWith = auto()
# contains = auto()
# in = auto()
@DynamicClassAttribute
def operator(self) -> Callable[[Any, Any], bool]:
"""Return python operator."""
return getattr(operator, self._value_)
class Queryables(str, AutoValueEnum):
"""Queryable fields.
Define an enum of queryable fields and their data type. Queryable fields are explicitly defined for two reasons:
1. So the caller knows which fields they can query by
2. Because JSONB queries with sqlalchemy ORM require casting the type of the field at runtime
(see ``QueryableTypes``)
# TODO: Let the user define these in a config file
"""
orientation = auto()
gsd = auto()
epsg = "proj:epsg"
height = auto()
width = auto()
minzoom = "cog:minzoom"
maxzoom = "cog:maxzoom"
dtype = "cog:dtype"
foo = "foo"
@dataclass
class QueryableTypes:
"""Defines a set of queryable fields.
# TODO: Let the user define these in a config file
# TODO: There is a much better way of defining this field <> type mapping than two enums with same keys
"""
orientation = sa.String
gsd = sa.Float
epsg = sa.Integer
height = sa.Integer
width = sa.Integer
minzoom = sa.Integer
maxzoom = sa.Integer
dtype = sa.String
class FieldsExtension(FieldsBase):
"""FieldsExtension.
Attributes:
include: set of fields to include.
exclude: set of fields to exclude.
"""
include: Optional[Set[str]] = set()
exclude: Optional[Set[str]] = set()
@staticmethod
def _get_field_dict(fields: Optional[Set[str]]) -> Dict:
"""Pydantic include/excludes notation.
Internal method to create a dictionary for advanced include or exclude of pydantic fields on model export
Ref: https://pydantic-docs.helpmanual.io/usage/exporting_models/#advanced-include-and-exclude
"""
field_dict = {}
for field in fields or []:
if "." in field:
parent, key = field.split(".")
if parent not in field_dict:
field_dict[parent] = {key}
else:
field_dict[parent].add(key)
else:
field_dict[field] = ... # type:ignore
return field_dict
@property
def filter_fields(self) -> Dict:
"""Create pydantic include/exclude expression.
Create dictionary of fields to include/exclude on model export based on the included and excluded fields passed
to the API
Ref: https://pydantic-docs.helpmanual.io/usage/exporting_models/#advanced-include-and-exclude
"""
# Always include default_includes, even if they
# exist in the exclude list.
include = (self.include or set()) - (self.exclude or set())
include |= Settings.get().default_includes or set()
return {
"include": self._get_field_dict(include),
"exclude": self._get_field_dict(self.exclude),
}
class SQLAlchemySTACSearch(Search):
"""Search model."""
# Make collections optional, default to searching all collections if none are provided
collections: Optional[List[str]] = None
# Override default field extension to include default fields and pydantic includes/excludes factory
field: FieldsExtension = Field(FieldsExtension(), alias="fields")
# Override query extension with supported operators
query: Optional[Dict[Queryables, Dict[Operator, Any]]]
token: Optional[str] = None
limit: Optional[conint(ge=0, le=10000)] = 10
@root_validator(pre=True)
def validate_query_fields(cls, values: Dict) -> Dict:
"""Validate query fields."""
logger.debug(f"Validating SQLAlchemySTACSearch {cls} {values}")
if "query" in values and values["query"]:
queryable_fields = Queryables.__members__.values()
for field_name in values["query"]:
if field_name not in queryable_fields:
raise ValidationError(
[
ErrorWrapper(
ValueError(f"Cannot search on field: {field_name}"),
"STACSearch",
)
],
SQLAlchemySTACSearch,
)
return values
| 32.970238
| 119
| 0.638563
|
acfc15fe2036ec9fb049bcdb3493170db52c09d8
| 628
|
py
|
Python
|
test/test_float32.py
|
lukovnikov/teafacto
|
5e863df8d061106ad705c0837f2d2ca4e08db0e4
|
[
"MIT"
] | 2
|
2016-06-28T23:41:42.000Z
|
2017-01-14T12:41:36.000Z
|
test/test_float32.py
|
lukovnikov/teafacto
|
5e863df8d061106ad705c0837f2d2ca4e08db0e4
|
[
"MIT"
] | 1
|
2016-04-20T20:09:20.000Z
|
2016-08-17T19:02:47.000Z
|
test/test_float32.py
|
lukovnikov/teafacto
|
5e863df8d061106ad705c0837f2d2ca4e08db0e4
|
[
"MIT"
] | 5
|
2016-07-18T17:05:18.000Z
|
2018-10-13T05:40:05.000Z
|
from unittest import TestCase
from teafacto.blocks.basic import IdxToOneHot, VectorEmbed
import theano, numpy as np
class TestFloat32(TestCase):
def setUp(self):
theano.config.floatX = "float32"
def test_idxtoonehot(self):
m = IdxToOneHot(100)
self.assertEqual(m.W.dtype, "float32")
def test_vectorembed_init(self):
m = VectorEmbed(indim=100, dim=100)
self.assertEqual(m.W.dtype, "float32")
def test_vectorembed_cast(self):
md = np.random.random((100, 100))
m = VectorEmbed(indim=100, dim=100, value=md)
self.assertEqual(m.W.dtype, "float32")
| 28.545455
| 58
| 0.670382
|
acfc163bbaceb6c95726274df3685628a745ceda
| 8,899
|
py
|
Python
|
chapter2/intogen-arrays/src/mrna/mrna_oncodrive_calc.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | 1
|
2015-12-22T00:53:18.000Z
|
2015-12-22T00:53:18.000Z
|
chapter2/intogen-arrays/src/mrna/mrna_oncodrive_calc.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | null | null | null |
chapter2/intogen-arrays/src/mrna/mrna_oncodrive_calc.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""
Calculate oncodrive results for upregulation and downregulation using the cutoffs calculated previously
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
- repositories.data: (optional) The path to the repository where data files are written. Default value = work.path
- overwrite: (optional) Overwrite already existing files ?. Default = no
- bin_paths.gitools: Path to gitools
* Input:
- log2r_tumour_unit_ids: The mrna.log2r_tumour_unit ids to process
* Output:
- oncodrive_results_ids: The ids of the created mrna.oncodrive_probes
* Entities:
- mrna.log2r_tumour_unit, mrna.log2r_cutoff, mrna.oncodrive_probes
"""
import sys
import os
import uuid
import subprocess
from wok.task import Task
from intogen.io import FileReader, FileWriter
from intogen.utils import skip_file
from intogen.repository import rpath
from intogen.repository.server import RepositoryServer
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run_oncodrive(conf, log, oncodrive, cond, matrix_local_path, cmp, cutoff, tmp_path):
prefix = "-".join((oncodrive["id"], cond))
results_path = os.path.join(tmp_path, prefix + "-results.tdm.gz")
results_base_path = os.path.dirname(results_path)
gitools_oncodrive_bin = os.path.join(conf["bin_paths.gitools"], "bin", "gitools-oncodrive")
cmd = " ".join([
gitools_oncodrive_bin,
"-N", prefix, "-w", results_base_path,
"-t binomial", "-p 1",
"-d", matrix_local_path, "-df cdm",
"-b", ",".join((cmp, str(cutoff)))])
log.debug(cmd)
retcode = subprocess.call(args = cmd, shell = True)
sys.stdout.write("\n")
sys.stdout.flush()
if retcode != 0:
raise Exception("Oncodrive exit code = %i" % retcode)
return results_path
def read_header(f):
hdr = {}
line = f.readline().lstrip()
while len(line) > 0 and line[0] == '#':
line = f.readline().lstrip()
fields = line.rstrip().split("\t")
for index, h in enumerate(fields):
if len(h) > 2 and h[0] == '"' and h[-1] == '"':
h = h[1:-1]
hdr[h] = index
return hdr
def read_data(line, hdr, key_field, fields):
data = []
l = line.rstrip().split("\t")
index = hdr[key_field]
d = l[index]
if len(d) > 2 and d[0] == '"' and d[-1] == '"':
d = d[1:-1]
key = d
for field in fields:
index = hdr[field]
d = l[index]
if len(d) > 2 and d[0] == '"' and d[-1] == '"':
d = d[1:-1]
data += [d]
return key, data
# Fields to retrieve from oncodrive results
FIELDS = [
"N", "observed",
"expected-mean", "expected-stdev", "probability",
"right-p-value", "corrected-right-p-value"]
def read_data_map(log, upreg_results, downreg_results):
dmap = {}
log.debug("Reading upreg data from {0} ...".format(upreg_results))
# read upreg data
uf = FileReader(upreg_results)
hdr = read_header(uf)
count = 0
for line in uf:
k, d = read_data(line, hdr, "row", FIELDS)
dmap[k] = d
count += 1
uf.close()
log.debug("Total upreg rows = {0}".format(count))
log.debug("Reading downreg data from {0} ...".format(downreg_results))
# read downreg data and join with upreg
df = FileReader(downreg_results)
hdr = read_header(df)
count = 0
for line in df:
k, d = read_data(line, hdr, "row", FIELDS)
if k not in dmap:
data = ["-"] * len(FIELDS)
else:
data = dmap[k]
data += d
dmap[k] = data
count += 1
log.debug("Total downreg rows = {0}".format(count))
return dmap
def write_data_map(dmap, path):
rf = FileWriter(path)
hdr = ["id"]
hdr.extend(["_".join(("upreg", f.replace("-", "_").lower())) for f in FIELDS])
hdr.extend(["_".join(("downreg", f.replace("-", "_").lower())) for f in FIELDS])
rf.write("\t".join(hdr) + "\n")
for row, values in dmap.iteritems():
rf.write(row)
for v in values:
rf.write("\t")
rf.write(v)
if len(values) == len(FIELDS):
rf.write("\t".join(["-"] * len(FIELDS)))
rf.write("\n")
rf.close()
def run(task):
# Initialization
task.check_conf(["entities", "repositories", "bin_paths.gitools"])
conf = task.conf
log = task.logger()
task.check_in_ports(["log2r_tumour_unit_ids"])
task.check_out_ports(["oncodrive_results_ids"])
log2r_tumour_unit_port = task.ports["log2r_tumour_unit_ids"]
oncodrive_results_port = task.ports["oncodrive_results_ids"]
es = EntityServer(conf["entities"])
em = es.manager()
rs = RepositoryServer(conf["repositories"])
data_repo = rs.repository("data")
overwrite = conf.get("overwrite", False, dtype=bool)
# Run
log.info("Indexing available oncodrive results for probes ...")
oncodrive_results_index = em.group_ids(
["study_id", "platform_id", "icdo_topography", "icdo_morphology"],
types.MRNA_ONCODRIVE_PROBES, unique = True)
log.info("Indexing available mrna log2r cutoffs ...")
log2r_cutoff_index = em.group_ids(
["study_id", "platform_id", "icdo_topography", "icdo_morphology"],
types.MRNA_LOG2R_CUTOFF, unique = True)
results_base_path = types.MRNA_ONCODRIVE_PROBES.replace(".", "/")
for log2r_unit_id in log2r_tumour_unit_port:
u = em.find(log2r_unit_id, types.MRNA_LOG2R_TUMOUR_UNIT)
if u is None:
log.error("{} not found: {}".format(types.MRNA_LOG2R_TUMOUR_UNIT, log2r_unit_id))
continue
key = (u["study_id"], u["platform_id"], u["icdo_topography"], u["icdo_morphology"])
if key in oncodrive_results_index:
eid = oncodrive_results_index[key][0]
e = em.find(eid, types.MRNA_ONCODRIVE_PROBES)
if e is None:
log.error("{} not found: {}".format(types.MRNA_ONCODRIVE_PROBES, eid))
continue
else:
e = u.transform(["study_id", "platform_id", "icdo_topography", "icdo_morphology"])
eid = e["id"] = str(uuid.uuid4())
log.info("Calculating Oncodrive results for {} ({}) [{}] ...".format(types.MRNA_LOG2R_TUMOUR_UNIT, ", ".join(key), log2r_unit_id))
log.debug("{} id is {}".format(types.MRNA_ONCODRIVE_PROBES, eid))
# create oncodrive results entity
e["log2r_tumour_unit_id"] = log2r_unit_id
results_path = rpath.join(results_base_path, eid + ".tsv.gz")
if skip_file(overwrite, data_repo, results_path, e.get("results_file")):
log.warn("Skipping ({}) [{}] as it already exists".format(", ".join(key), eid))
oncodrive_results_port.write(eid)
continue
e["results_file"] = data_repo.url(results_path)
# data matrix for oncodrive calculation
file_repo = u["data_file/repo"]
matrix_repo = rs.repository(file_repo)
file_path = u["data_file/path"]
file_name = u["data_file/file"]
matrix_path = os.path.join(file_path, file_name)
# Load calculated cutoff
log.info("Loading mrna cutoff for key ({}) ...".format(", ".join(key)))
if key not in log2r_cutoff_index:
log.error("mrna log2r cuttof not found for key ({})".format(", ".join(key)))
matrix_repo.close()
continue
cutoff_id = log2r_cutoff_index[key][0]
cutoff = em.find(cutoff_id, types.MRNA_LOG2R_CUTOFF)
if cutoff is None:
log.error("mrna log2r cuttof for key ({}) [{}] couldn't be loaded".format(", ".join(key), cutoff_id))
matrix_repo.close()
continue
log.debug("{} id is {}".format(types.MRNA_LOG2R_CUTOFF, cutoff_id))
# Upregulation & downregulation
try:
from tempfile import mkdtemp
tmp_path = mkdtemp(prefix = "mrna_oncodrive_calc_")
log.debug("Temporary directory: {}".format(tmp_path))
matrix_local_path = matrix_repo.get_local(matrix_path)
log.debug("Matrix path: {}".format(matrix_path))
try:
log.info("Calculating Upregulation with cutoff {} ...".format(cutoff["upreg/cutoff"]))
upreg_results = run_oncodrive(
conf, log, e, "upreg", matrix_local_path,
"gt", cutoff["upreg/cutoff"], tmp_path)
except:
log.error("Oncodrive calculation for upreg failed")
matrix_repo.close_local(matrix_local_path)
raise
try:
log.info("Calculating Downregulation with cutoff {} ...".format(cutoff["downreg/cutoff"]))
downreg_results = run_oncodrive(
conf, log, e, "downreg", matrix_local_path,
"lt", cutoff["downreg/cutoff"], tmp_path)
except:
log.error("Oncodrive calculation for downreg failed")
matrix_repo.close_local(matrix_local_path)
raise
# Join upreg & downreg results
log.info("Joining upreg & downreg results into memory ...")
# the join is done in memory with a map
dmap = read_data_map(log, upreg_results, downreg_results)
log.info("Writting joined results to {} ...".format(results_path))
results_local_path = data_repo.create_local(results_path)
write_data_map(dmap, results_local_path)
finally:
matrix_repo.close_local(matrix_local_path)
matrix_repo.close()
if os.path.exists(tmp_path):
log.debug("Removing temporary directory {} ...".format(tmp_path))
import shutil
shutil.rmtree(tmp_path)
data_repo.put_local(results_local_path)
em.persist(e, types.MRNA_ONCODRIVE_PROBES)
oncodrive_results_port.write(eid)
em.close()
data_repo.close()
rs.close()
if __name__ == "__main__":
Task(run).start()
| 28.161392
| 132
| 0.692774
|
acfc165d62516803af9cb1108f2f1579c2db937d
| 28,923
|
py
|
Python
|
BOT/lib/cogs/product.py
|
Elijah-glitch/Roblox-Purchasing-Hub
|
d06f9edef6b4fbc22135a856f1f53b25453c7591
|
[
"MIT"
] | 14
|
2021-05-20T19:43:39.000Z
|
2022-03-06T04:07:33.000Z
|
BOT/lib/cogs/product.py
|
Elijah-glitch/Roblox-Purchasing-Hub
|
d06f9edef6b4fbc22135a856f1f53b25453c7591
|
[
"MIT"
] | 3
|
2022-02-18T19:04:01.000Z
|
2022-02-18T19:34:19.000Z
|
BOT/lib/cogs/product.py
|
Elijah-glitch/Roblox-Purchasing-Hub
|
d06f9edef6b4fbc22135a856f1f53b25453c7591
|
[
"MIT"
] | 10
|
2021-09-26T19:30:56.000Z
|
2022-03-28T15:55:36.000Z
|
"""
File: /lib/cogs/product.py
Info: This cog handles all commands related to products
"""
import nextcord
from nextcord import message
from nextcord.components import Button
from nextcord.errors import Forbidden
from nextcord.ext.commands import Cog, command, has_permissions
from nextcord import Embed, Colour, colour, ui, Interaction, SelectOption, ButtonStyle
from datetime import datetime
from nextcord.ui.button import button
from nextcord.ui.select import select
from nextcord.user import BU
from ..utils.api import * # Imports everything from the API util
from ..utils.database import find
from ..utils.util import AreYouSureView
import json
productoptions = []
# Cancel Button
class CancelView(ui.View):
def __init__(self, context):
super().__init__(timeout=None)
self.context = context
self.canceled = False
@ui.button(label="Cancel", style=ButtonStyle.danger, custom_id="products:cancel")
async def cancel(self, _, interaction: Interaction):
await interaction.message.delete()
await interaction.response.send_message("Canceled", ephemeral=True)
self.canceled = True
self.stop()
# Delete view
class DeleteView(ui.View):
def __init__(self, context):
super().__init__(timeout=None)
self.context = context
global productoptions
productoptions.clear()
for product in getproducts():
productoptions.append(
SelectOption(label=product["name"], description=product["price"])
)
@ui.select(
custom_id="products:delete_select",
options=productoptions,
)
async def delete_select(self, _, interaction: Interaction):
product = str(interaction.data["values"])[2:-2]
await interaction.message.delete()
await interaction.channel.send(
f"Are you sure you would like to delete {product}?",
view=AreYouSureView(self.context, "deleteproduct", product),
reference=self.context.message,
)
# Update View's
## What to update
class WhatUpdateView(ui.View):
def __init__(self, context, product, bot):
super().__init__(timeout=600.0)
self.context = context
self.product = getproduct(product)
self.bot = bot
@ui.button(
label="Name", style=ButtonStyle.primary, custom_id="products:update_name"
)
async def update_name(self, _, interaction: Interaction):
embed = Embed(
title=f"Update {self.product['name']}",
description=f"What would you like to change the name to?",
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
view = CancelView(self.context)
await interaction.message.edit("", embed=embed, view=None)
def check(m):
return m.content and m.author == self.context.author
try:
message = await self.bot.wait_for("message", timeout=600.0, check=check)
except TimeoutError:
await interaction.message.delete()
await interaction.response.send_message("Timed Out", ephemeral=True)
self.stop()
if not message is None and view.canceled is False:
if message.content.lower() == "cancel":
await interaction.message.delete()
await interaction.response.send_message("Canceled", ephemeral=True)
self.stop()
else:
await interaction.message.delete()
view = AreYouSureView(self.context)
message = await self.context.send(
f"Are you sure you would like to change {self.product['name']} to {message.content}?",
view=view,
reference=self.context.message,
)
await view.wait()
if view.Return == None:
await message.delete()
await interaction.response.send_message("Timed out", ephemeral=True)
elif view.Return == False:
await message.delete()
await interaction.response.send_message("Canceled update", ephemeral=True)
elif view.Return == True:
try:
updateproduct(
self.product["name"], message.content, self.product["description"], self.product["price"], self.product["attachments"]
)
await interaction.message.delete()
name = self.product["name"]
await interaction.response.send_message(
f"Updated {name}.",
ephemeral=True,
)
except:
await interaction.message.delete()
await interaction.response.send_message(
f"Failed to update {self.args[0]}.",
ephemeral=True,
)
@ui.button(
label="Description",
style=ButtonStyle.primary,
custom_id="products:update_description",
)
async def update_description(self, _, interaction: Interaction):
embed = Embed(
title=f"Update {self.product['name']}",
description=f"What would you like to change the description to?",
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
view = CancelView(self.context)
await interaction.message.edit("", embed=embed, view=None)
def check(m):
return m.content and m.author == self.context.author
try:
message = await self.bot.wait_for("message", timeout=600.0, check=check)
except TimeoutError:
await interaction.message.delete()
await interaction.response.send_message("Timed Out", ephemeral=True)
self.stop()
if not message is None and view.canceled is False:
if message.content.lower() == "cancel":
await interaction.message.delete()
await interaction.response.send_message("Canceled", ephemeral=True)
self.stop()
else:
await interaction.message.delete()
view = AreYouSureView(self.context)
await self.context.send(
f"Are you sure you would like to change {self.product['description']} to {message.content}?",
view=view,
reference=self.context.message,
)
await view.wait()
if view.Return == None:
await message.delete()
await interaction.response.send_message("Timed out", ephemeral=True)
elif view.Return == False:
await message.delete()
await interaction.response.send_message("Canceled update", ephemeral=True)
elif view.Return == True:
try:
updateproduct(
self.product["name"], self.product["name"], message.content, self.product["price"], self.product["attachments"]
)
await interaction.message.delete()
name = self.product["name"]
await interaction.response.send_message(
f"Updated {name}.",
ephemeral=True,
)
except:
await interaction.message.delete()
await interaction.response.send_message(
f"Failed to update {self.args[0]}.",
ephemeral=True,
)
@ui.button(
label="Price", style=ButtonStyle.primary, custom_id="products:update_price"
)
async def update_price(self, _, interaction: Interaction):
embed = Embed(
title=f"Update {self.product['name']}",
description=f"What would you like to change the price to?",
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
view = CancelView(self.context)
await interaction.message.edit("", embed=embed, view=None)
def check(m):
return m.content and m.author == self.context.author
try:
message = await self.bot.wait_for("message", timeout=600.0, check=check)
except TimeoutError:
await interaction.message.delete()
await interaction.response.send_message("Timed Out", ephemeral=True)
self.stop()
if not message is None and view.canceled is False:
if message.content.lower() == "cancel":
await interaction.message.delete()
await interaction.response.send_message("Canceled", ephemeral=True)
self.stop()
else:
await interaction.message.delete()
view = AreYouSureView(self.context)
await self.context.send(
f"Are you sure you would like to change {self.product['price']} to {int(message.content)}?",
view=view,
reference=self.context.message,
)
await view.wait()
if view.Return == None:
await message.delete()
await interaction.response.send_message("Timed out", ephemeral=True)
elif view.Return == False:
await message.delete()
await interaction.response.send_message("Canceled update", ephemeral=True)
elif view.Return == True:
try:
updateproduct(
self.product["name"], self.product["name"], self.product["description"], int(message.content), self.product["attachments"]
)
await interaction.message.delete()
name = self.product["name"]
await interaction.response.send_message(
f"Updated {name}.",
ephemeral=True,
)
except:
await interaction.message.delete()
await interaction.response.send_message(
f"Failed to update {self.args[0]}.",
ephemeral=True,
)
@ui.button(
label="Attachments",
style=ButtonStyle.primary,
custom_id="products:update_attachments",
)
async def update_attachments(self, _, interaction: Interaction):
embed = Embed(
title=f"Update {self.product['name']}",
description=f'Please post the attachments now. Say "Done" when you are done.',
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
fields = [
(
"Attachments",
"None",
False,
)
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
view = CancelView(self.context)
await interaction.message.edit("", embed=embed, view=None)
def check(m):
return m.author == self.context.author
attachments = []
while True:
try:
message = await self.bot.wait_for("message", timeout=600.0, check=check)
except TimeoutError:
await interaction.message.delete()
await interaction.response.send_message("Timed Out", ephemeral=True)
self.stop()
if message.content.lower() == "cancel":
await interaction.message.delete()
await self.context.send(
"Canceled", reference=self.context.message, delete_after=5.0
)
break
if message.content.lower() == "done":
break
elif not message.attachments == [] and message.attachments:
for attachment in message.attachments:
attachments.append(attachment.url)
embed = Embed(
title=f"Update {self.product['name']}",
description=f'Please post the attachments now. Say "Done" when you are done.',
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
fields = [
(
"Attachments",
"\n".join([attachment for attachment in attachments]),
False,
)
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_footer(text="Pembroke Bot • By: parker02311")
await interaction.message.edit("", embed=embed, view=None)
await self.context.send(
"It is recommended to not delete this message unless needed.",
reference=message,
)
if attachments:
await interaction.message.delete()
view = AreYouSureView(self.context)
await self.context.send(
f"Are you sure you would like to change {self.product['attachments']} to {attachments}?",
view=view,
reference=self.context.message,
)
await view.wait()
if view.Return == None:
await message.delete()
await interaction.response.send_message("Timed out", ephemeral=True)
elif view.Return == False:
await message.delete()
await interaction.response.send_message("Canceled update", ephemeral=True)
elif view.Return == True:
try:
updateproduct(
self.product["name"], self.product["name"], self.product["description"], self.product["price"], attachments
)
await interaction.message.delete()
name = self.product["name"]
await interaction.response.send_message(
f"Updated {name}.",
ephemeral=True,
)
except:
await interaction.message.delete()
await interaction.response.send_message(
f"Failed to update {self.args[0]}.",
ephemeral=True,
)
@ui.button(
label="cancel", style=ButtonStyle.danger, custom_id="products:update_cancel"
)
async def update_cancel(self, _, interaction: Interaction):
await interaction.message.delete()
await interaction.response.send_message("Canceled", ephemeral=True)
self.stop()
## Initial View
class InitialUpdateView(ui.View):
def __init__(self, context, bot):
super().__init__(timeout=600.0)
self.context = context
self.bot = bot
global productoptions
productoptions.clear()
for product in getproducts():
productoptions.append(
SelectOption(label=product["name"], description=product["price"])
)
@ui.select(
custom_id="products:update_select",
options=productoptions,
)
async def update_select(self, _, interaction: Interaction):
product = str(interaction.data["values"])[2:-2]
embed = Embed(
title=f"Update {product}",
description=f"What would you like to change?",
colour=Colour.blue(),
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(text="Redon Tech RPH • By: parker02311")
await interaction.message.edit(
"", embed=embed, view=WhatUpdateView(self.context, product, self.bot)
)
class Product(Cog):
def __init__(self, bot):
self.bot = bot
@command(
name="products",
aliases=["listproducts", "viewproducts", "allproducts"],
brief="Sends a list of all products.",
catagory="product",
)
async def getproducts(self, ctx):
dbresponse = getproducts()
embed = Embed(
title="Products",
description=f"Here is all the products I was able to get for this server!",
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
fields = []
for product in dbresponse:
fields.append(
(
product["name"],
"Product Description: "
+ str(product["description"])
+ "\nProduct Price: "
+ str(product["price"]),
False,
)
)
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_footer(text="Redon Tech RPH • By: parker02311")
await ctx.send(embed=embed, reference=ctx.message)
@command(
name="retrieve",
aliases=["retrieveproduct", "getproduct"],
brief="DM's you the specified product if you own it.",
catagory="product",
)
async def retrieveproduct(self, ctx, *, product: str):
userinfo = getuserfromdiscord(ctx.author.id)
if userinfo:
if product in userinfo["purchases"]:
embed = Embed(
title="Thanks for your purchase!",
description=f"Thank you for your purchase of {product} please get it by using the links below.",
colour=Colour.from_rgb(255, 255, 255),
timestamp=nextcord.utils.utcnow(),
)
try:
if not ctx.author.dm_channel:
await ctx.author.create_dm()
await ctx.author.dm_channel.send(embed=embed)
for attachment in getproduct(product)["attachments"]:
await ctx.author.dm_channel.send(attachment)
except Forbidden:
await ctx.send(
"Please open your DM's and try again.", reference=ctx.message
)
@command(
name="createproduct",
aliases=["newproduct", "makeproduct"],
brief="Create a new product.",
catagory="product",
)
@has_permissions(manage_guild=True)
async def createproduct(self, ctx):
questions = [
"What do you want to call this product?",
"What do you want the description of the product to be?",
"What do you want the product price to be?",
"attachments",
]
embedmessages = []
usermessages = []
awnsers = []
attachments = []
def check(m):
return m.content and m.author == ctx.author
def emojicheck(self, user):
return user == ctx.author
def attachmentcheck(m):
return m.author == ctx.author
for i, question in enumerate(questions):
if question == "attachments":
embed = Embed(
title=f"Create Product (Question {i+1})",
description='Please post any attachments\nSay "Done" when complete',
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
fields = [
(
"Attachments",
"None",
False,
)
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embedmessage = await ctx.send(embed=embed)
embedmessages.append(embedmessage)
while True:
try:
message = await self.bot.wait_for(
"message", timeout=200.0, check=attachmentcheck
)
except TimeoutError:
await ctx.send("You didn't answer the questions in Time")
return
if message.content.lower() == "cancel":
usermessages.append(message)
for message in embedmessages:
await message.delete()
for message in usermessages:
await message.delete()
await ctx.message.delete()
await ctx.send("Canceled", delete_after=5.0)
break
if message.content.lower() == "done":
usermessages.append(message)
break
elif not message.attachments == [] and message.attachments:
for attachment in message.attachments:
attachments.append(attachment.url)
embed = Embed(
title=f"Create Product (Question {i+1})",
description='Please post any attachments\nSay "Done" when complete',
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
fields = [
(
"Attachments",
"\n".join(
[attachment for attachment in attachments]
),
False,
)
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_footer(text="Pembroke Bot • By: parker02311")
await embedmessage.edit(embed=embed)
await ctx.send(
"It is recommended to not delete this message unless needed.",
reference=message,
)
else:
embed = Embed(
title=f"Create Product (Question {i+1})",
description=question,
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
embed.set_footer(
text='Redon Tech RPH • Say "Cancel" to cancel. • By: parker02311'
)
embedmessage = await ctx.send(embed=embed)
embedmessages.append(embedmessage)
try:
message = await self.bot.wait_for(
"message", timeout=200.0, check=check
)
except TimeoutError:
await ctx.send("You didn't answer the questions in Time")
return
if message.content.lower() == "cancel":
usermessages.append(message)
for message in embedmessages:
await message.delete()
for message in usermessages:
await message.delete()
await ctx.message.delete()
await ctx.send("Canceled", delete_after=5.0)
break
else:
usermessages.append(message)
awnsers.append(message.content)
lastbeforefinal = await ctx.send(
"Creating final message this may take a moment."
)
for message in embedmessages:
await message.delete()
for message in usermessages:
await message.delete()
embed = Embed(
title="Confirm Product Creation",
description="✅ to confirm\n❌ to cancel",
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
fields = [
("Name", awnsers[0], False),
("Description", awnsers[1], False),
("Price", awnsers[2], False),
(
"Attachments",
"\n".join([attachment for attachment in attachments]),
False,
),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_footer(text="Redon Tech RPH • By: parker02311")
finalmessage = await ctx.send(embed=embed)
await finalmessage.add_reaction("✅")
await finalmessage.add_reaction("❌")
await lastbeforefinal.delete()
try:
reaction, user = await self.bot.wait_for(
"reaction_add", timeout=200.0, check=emojicheck
)
except TimeoutError:
await ctx.author.send("You didn't respond in time.")
return
if str(reaction.emoji) == "✅":
try:
createproduct(awnsers[0], awnsers[1], awnsers[2], attachments)
except:
await ctx.send(
"I was unable to create the product...", delete_after=5.0
)
raise
embed = Embed(
title="Product Created",
description="The product was successfully created.",
colour=ctx.author.colour,
timestamp=nextcord.utils.utcnow(),
)
fields = [
("Name", awnsers[0], False),
("Description", awnsers[1], False),
("Price", awnsers[2], False),
(
"Attachments",
"\n".join([attachment for attachment in attachments]),
False,
),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_footer(text="Redon Tech RPH • By: parker02311")
await ctx.send(embed=embed)
await finalmessage.delete()
await ctx.message.delete()
@command(
name="deleteproduct",
aliases=["removeproduct", "terminateproduct", "fuckoffpieceofshitproduct"],
brief="Delete's a product.",
catagory="product",
)
@has_permissions(manage_guild=True)
async def deleteproduct(self, ctx):
await ctx.send(
"Chose a product to delete", view=DeleteView(ctx), reference=ctx.message
)
@command(
name="updateproduct",
aliases=["changeproduct"],
brief="Update's a product.",
catagory="product",
)
@has_permissions(manage_guild=True)
async def updateproduct(self, ctx):
await ctx.send(
"Chose a product to update.",
view=InitialUpdateView(ctx, self.bot),
reference=ctx.message,
)
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("product")
await self.bot.stdout.send("`/lib/cogs/product.py` ready")
print(" /lib/cogs/product.py ready")
def setup(bot):
bot.add_cog(Product(bot))
| 37.956693
| 150
| 0.51077
|
acfc197a9618e53ebbc9c7e90e547abdfadd0ccd
| 610
|
py
|
Python
|
twitter/tweets-extract-text.py
|
keelanfh/electionary
|
1c33cc41f2b7357ba45d279d09f13b54026fbba7
|
[
"MIT"
] | null | null | null |
twitter/tweets-extract-text.py
|
keelanfh/electionary
|
1c33cc41f2b7357ba45d279d09f13b54026fbba7
|
[
"MIT"
] | null | null | null |
twitter/tweets-extract-text.py
|
keelanfh/electionary
|
1c33cc41f2b7357ba45d279d09f13b54026fbba7
|
[
"MIT"
] | null | null | null |
# Simple program to output a text file with all the tweets from each of the two accounts
import json
import os
from commonfunctions import commonfunctions as cf
filenames = ['HillaryClintonTweets.json', 'realDonaldTrumpTweets.json']
for x in filenames:
with open(os.path.abspath(os.path.join('twitter',x)), 'r') as f:
tweets = json.load(f)
tweets_text = [cf.unicode_to_ascii(tweet['text'].replace(u'\u2014', ' ')) for tweet in tweets]
tweets_text = " ".join(tweets_text)
with open(os.path.abspath(os.path.join('twitter', x[:-5] + '.txt')), 'w') as f:
f.write(tweets_text)
| 32.105263
| 98
| 0.688525
|
acfc199c67c61717d396c291fe677715202cf25b
| 75
|
py
|
Python
|
mlclas/tree/__init__.py
|
markzy/multi-label-classification
|
e5b10351f2b9f1b3eba7f81b7edb7acad6f313d4
|
[
"MIT"
] | 1
|
2016-05-17T03:36:35.000Z
|
2016-05-17T03:36:35.000Z
|
mlclas/tree/__init__.py
|
markzy/multi-label-classification
|
e5b10351f2b9f1b3eba7f81b7edb7acad6f313d4
|
[
"MIT"
] | null | null | null |
mlclas/tree/__init__.py
|
markzy/multi-label-classification
|
e5b10351f2b9f1b3eba7f81b7edb7acad6f313d4
|
[
"MIT"
] | null | null | null |
from mlclas.tree.ml_dt import MLDecisionTree
__all__ = ['MLDecisionTree']
| 18.75
| 44
| 0.8
|
acfc1ad4c33fab0cb9e3aa7071d1c6fedd4cd51b
| 430
|
py
|
Python
|
tutproject/todo/migrations/0002_alter_taskmodels_type_task.py
|
kosarkarbasi/python_course
|
106f885729fd43e79af3f061c914f02617e5f11f
|
[
"Apache-2.0"
] | 11
|
2021-07-15T11:14:43.000Z
|
2022-02-08T08:19:57.000Z
|
tutproject/todo/migrations/0002_alter_taskmodels_type_task.py
|
kosarkarbasi/python_course
|
106f885729fd43e79af3f061c914f02617e5f11f
|
[
"Apache-2.0"
] | null | null | null |
tutproject/todo/migrations/0002_alter_taskmodels_type_task.py
|
kosarkarbasi/python_course
|
106f885729fd43e79af3f061c914f02617e5f11f
|
[
"Apache-2.0"
] | 39
|
2021-07-15T10:42:31.000Z
|
2021-07-25T13:44:25.000Z
|
# Generated by Django 3.2.5 on 2021-07-12 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='taskmodels',
name='type_task',
field=models.CharField(choices=[('reminder', 'یادآور'), ('task', 'تسک')], max_length=10),
),
]
| 22.631579
| 101
| 0.586047
|
acfc1b40bc5f54b94aa9c3daf316934a38df3e0c
| 17,217
|
py
|
Python
|
kats/models/sarima.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | 1
|
2021-06-22T03:40:33.000Z
|
2021-06-22T03:40:33.000Z
|
kats/models/sarima.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | null | null | null |
kats/models/sarima.py
|
menefotto/Kats
|
3fc8a3f819502d45736734eabb3601f42a6b7759
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import List, Dict, Optional, Tuple, Callable, Union, Any
import kats.models.model as m
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData, Params
from kats.utils.parameter_tuning_utils import (
get_default_sarima_parameter_search_space,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
class SARIMAParams(Params):
"""Parameter class for SARIMA model
This is the parameter class for SARIMA model, it contains all necessary parameters as defined in SARIMA model implementation:
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html.
Attributes:
p: An integer for trend autoregressive (AR) order.
d: An integer for trend difference order.
q: An integer for trend moving average (MA) order.
exog: Optional; An array of exogenous regressors.
seasonal_order: Optional; A tuple for (P,D,Q,s) order of the seasonal component for AR order, difference order, MA order, and periodicity. Default is (0,0,0,0).
trend: Optional; A string or an iterable for deterministic trend. Can be 'c' (constant), 't' (linear trend with time), 'ct' (both constant and linear trend), or an iterable of integers defining the non-zero polynomial exponents to include. Default is None (not to include trend).
measurement_error: Optional; A boolean to specify whether or not to assume the observed time series were measured with error. Default is False.
time_varying_regression: Optional; A boolean to specify whether or not coefficients on the regressors (if provided) are allowed to vary over time. Default is False.
mle_regression: Optional; A boolean to specify whether or not to estimate coefficients of regressors as part of maximum likelihood estimation or through Kalman filter.
If time_varying_regression is True, this must be set to False. Default is True.
simple_differencing: Optional; A boolean to specify whether or not to use partially conditional maximum likelihood estimation.
See https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html for more details. Default is False.
enforce_stationarity: Optional; A boolean to specify whether or not to transform the AR parameters to enforce stationarity in AR component. Default is True.
enforce_invertibility: Optional; A boolean to specify whether or not to transform the MA parameters to enforce invertibility in MA component. Default is True.
hamilton_representation: Optional; A boolean to specify whether or not to use the Hamilton representation or the Harvey representation (if False). Default is False.
concentrate_scale: Optional; A boolean to specify whether or not to concentrate the scale (variance of the error term) out of the likelihood. Default is False.
trend_offset: Optional; An integer for the offset at which to start time trend value. Default is 1.
use_exact_diffuse: Optional; A boolean to specify whether or not to use exact diffuse initialization for non-stationary states. Default is False.
"""
__slots__ = ["p", "d", "q"]
def __init__(
self,
p: int,
d: int,
q: int,
exog=None,
seasonal_order: Tuple = (0, 0, 0, 0),
trend=None,
measurement_error: bool = False,
time_varying_regression: bool = False,
mle_regression: bool = True,
simple_differencing: bool = False,
enforce_stationarity: bool = True,
enforce_invertibility: bool = True,
hamilton_representation: bool = False,
concentrate_scale: bool = False,
trend_offset: int = 1,
use_exact_diffuse: bool = False,
**kwargs
) -> None:
super().__init__()
self.p = p
self.d = d
self.q = q
self.exog = exog
self.seasonal_order = seasonal_order
self.trend = trend
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
self.concentrate_scale = concentrate_scale
self.trend_offset = trend_offset
self.use_exact_diffuse = use_exact_diffuse
logging.debug(
"Initialized SARIMAParams with parameters. "
"p:{p}, d:{d}, q:{q},seasonal_order:{seasonal_order}".format(
p=p, d=d, q=q, seasonal_order=seasonal_order
)
)
def validate_params(self):
"""Not implemented."""
logging.info("Method validate_params() is not implemented.")
pass
class SARIMAModel(m.Model):
"""Model class for SARIMA.
This class provides fit, predict and plot methods for SARIMA model.
Attributes:
data: :class:`kats.consts.TimeSeriesData` object for input time series.
params: :class:`SARIMAParams` for model parameters.
"""
def __init__(
self,
data: TimeSeriesData,
params: SARIMAParams,
) -> None:
super().__init__(data, params)
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
self.start_params = None
self.transformed = None
self.includes_fixed = None
self.cov_type = None
self.cov_kwds = None
self.method = None
self.maxiter = None
self.full_output = None
self.disp = None
self.callback = None
self.return_params = None
self.optim_score = None
self.optim_complex_step = None
self.optim_hessian = None
self.low_memory = None
self.model = None
self.include_history = False
self.alpha = 0.05
self.fcst_df = None
self.freq = None
def fit(
self,
start_params: Optional[np.ndarray] = None,
transformed: bool = True,
includes_fixed: bool = False,
cov_type: Optional[str] = None,
cov_kwds: Optional[Dict] = None,
method: str = "lbfgs",
maxiter: int = 50,
full_output: bool = True,
disp: bool = False,
callback: Optional[Callable] = None,
return_params: bool = False,
optim_score: Optional[str] = None,
optim_complex_step: bool = True,
optim_hessian: Optional[str] = None,
low_memory: bool = False,
) -> None:
"""Fit SARIMA model by maximum likelihood via Kalman filter.
See reference https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit for more details.
Args:
start_params: Optional; An array_like object for the initial guess of the solution for the loglikelihood maximization.
transformed: Optional; A boolean to specify whether or not start_params is already transformed. Default is True.
includes_fixed: Optional; A boolean to specify whether or not start_params includes the fixed parameters in addition to the free parameters. Default is False.
cov_type: Optional; A string for the method for calculating the covariance matrix of parameter estimates. Can be 'opg' (outer product of gradient estimator), 'oim' (observed information matrix estimato),
'approx' (observed information matrix estimator), 'robust' (approximate (quasi-maximum likelihood) covariance matrix), or 'robust_approx'. Default is 'opg' when memory conservation is not used, otherwise default is ‘approx’.
cov_kwds: Optional; A dictionary of arguments for covariance matrix computation. See reference for more details.
method: Optional; A string for solver from scipy.optimize to be used. Can be 'newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg' or 'basinhopping'. Default is 'lbfgs'.
maxiter: Optional; An integer for the maximum number of iterations to perform. Default is 50.
full_output: Optional; A boolean to specify whether or not to have all available output in the Results object’s mle_retvals attribute. Default is True.
disp: Optional; A boolean to specify whether or not to print convergence messages. Default is False.
callback: Optional; A callable object to be called after each iteration. Default is None.
return_params: Optional; A boolean to specify whether or not to return only the array of maximizing parameters. Default is False.
optim_score: Optional; A string for the method by which the score vector is calculated. Can be 'harvey', 'approx' or None. Default is None.
optim_complex_step: Optional; A boolean to specify whether or not to use complex step differentiation when approximating the score. Default is True.
optim_hessian: Optional; A string for the method by which the Hessian is numerically approximated. Can be 'opg', 'oim', 'approx'. Default is None.
low_memory: Optional; A boolean to specify whether or not to reduce memory usage. If True, some features of the results object will not be available. Default is False.
Returns:
None.
"""
logging.debug("Call fit() method")
self.start_params = start_params
self.transformed = transformed
self.includes_fixed = includes_fixed
self.cov_type = cov_type
self.cov_kwds = cov_kwds
self.method = method
self.maxiter = maxiter
self.full_output = full_output
self.disp = disp
self.callback = callback
self.return_params = return_params
self.optim_score = optim_score
self.optim_complex_step = optim_complex_step
self.optim_hessian = optim_hessian
self.low_memory = low_memory
logging.info("Created SARIMA model.")
sarima = SARIMAX(
self.data.value,
order=(self.params.p, self.params.d, self.params.q),
exog=self.params.exog,
seasonal_order=self.params.seasonal_order,
trend=self.params.trend,
measurement_error=self.params.measurement_error,
time_varying_regression=self.params.time_varying_regression,
mle_regression=self.params.mle_regression,
simple_differencing=self.params.simple_differencing,
enforce_stationarity=self.params.enforce_stationarity,
enforce_invertibility=self.params.enforce_invertibility,
hamilton_representation=self.params.hamilton_representation,
concentrate_scale=self.params.concentrate_scale,
trend_offset=self.params.trend_offset,
use_exact_diffuse=self.params.use_exact_diffuse,
)
self.model = sarima.fit(
start_params=self.start_params,
transformed=self.transformed,
includes_fixed=self.includes_fixed,
cov_type=self.cov_type,
cov_kwds=self.cov_kwds,
method=self.method,
maxiter=self.maxiter,
full_output=self.full_output,
disp=self.disp,
callback=self.callback,
return_params=self.return_params,
optim_score=self.optim_score,
optim_complex_step=self.optim_complex_step,
optim_hessian=self.optim_hessian,
low_memory=self.low_memory,
)
logging.info("Fitted SARIMA.")
# pyre-fixme[14]: `predict` overrides method defined in `Model` inconsistently.
def predict(
self, steps: int, include_history: bool = False, alpha: float = 0.05, **kwargs
) -> pd.DataFrame:
"""Predict with fitted SARIMA model.
Args:
steps: An integer for forecast steps.
include_history: Optional; A boolearn to specify whether to include historical data. Default is False.
alpha: A float for confidence level. Default is 0.05.
Returns:
A :class:`pandas.DataFrame` of forecasts and confidence intervals.
"""
logging.debug(
"Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(steps=steps, kwargs=kwargs)
)
self.include_history = include_history
self.freq = kwargs.get("freq", self.data.infer_freq_robust())
self.alpha = alpha
fcst = self.model.get_forecast(steps)
logging.info("Generated forecast data from SARIMA model.")
logging.debug("Forecast data: {fcst}".format(fcst=fcst))
if fcst.predicted_mean.isna().sum() == steps:
msg = "SARIMA model fails to generate forecasts, i.e., all forecasts are NaNs."
logging.error(msg)
raise ValueError(msg)
# pyre-fixme[16]: `SARIMAModel` has no attribute `y_fcst`.
self.y_fcst = fcst.predicted_mean
pred_interval = fcst.conf_int(alpha)
if pred_interval.iloc[0, 0] < pred_interval.iloc[0, 1]:
# pyre-fixme[16]: `SARIMAModel` has no attribute `y_fcst_lower`.
self.y_fcst_lower = np.array(pred_interval.iloc[:, 0])
# pyre-fixme[16]: `SARIMAModel` has no attribute `y_fcst_upper`.
self.y_fcst_upper = np.array(pred_interval.iloc[:, 1])
else:
self.y_fcst_lower = np.array(pred_interval.iloc[:, 1])
self.y_fcst_upper = np.array(pred_interval.iloc[:, 0])
last_date = self.data.time.max()
dates = pd.date_range(start=last_date, periods=steps + 1, freq=self.freq)
# pyre-fixme[16]: `SARIMAModel` has no attribute `dates`.
self.dates = dates[dates != last_date] # Return correct number of periods
if include_history:
# generate historical fit
history_fcst = self.model.get_prediction(0)
history_ci = history_fcst.conf_int()
if ("lower" in history_ci.columns[0]) and (
"upper" in history_ci.columns[1]
):
ci_lower_name, ci_upper_name = (
history_ci.columns[0],
history_ci.columns[1],
)
else:
msg = (
"Error when getting prediction interval from statsmodels SARIMA API"
)
logging.error(msg)
raise ValueError(msg)
self.fcst_df = pd.DataFrame(
{
"time": np.concatenate(
(pd.to_datetime(self.data.time), self.dates)
),
"fcst": np.concatenate((history_fcst.predicted_mean, self.y_fcst)),
"fcst_lower": np.concatenate(
(history_ci[ci_lower_name], self.y_fcst_lower)
),
"fcst_upper": np.concatenate(
(history_ci[ci_upper_name], self.y_fcst_upper)
),
}
)
# the first k elements of the fcst and lower/upper are not legitmate
# thus we need to assign np.nan to avoid confusion
# k = max(p, d, q) + max(P, D, Q) * seasonal_order + 1
k = (
max(self.params.p, self.params.d, self.params.q)
+ max(self.params.seasonal_order[0:3]) * self.params.seasonal_order[3]
+ 1
)
self.fcst_df.loc[0:k, ["fcst", "fcst_lower", "fcst_upper"]] = np.nan
else:
self.fcst_df = pd.DataFrame(
{
"time": self.dates,
"fcst": self.y_fcst,
"fcst_lower": self.y_fcst_lower,
"fcst_upper": self.y_fcst_upper,
}
)
logging.debug("Return forecast data: {fcst_df}".format(fcst_df=self.fcst_df))
return self.fcst_df
def plot(self):
"""Plot forecasted results from SARIMA model."""
logging.info("Generating chart for forecast result from SARIMA model.")
m.Model.plot(self.data, self.fcst_df, include_history=self.include_history)
def __str__(self):
return "SARIMA"
@staticmethod
def get_parameter_search_space() -> List[Dict[str, Union[List[Any], bool, str]]]:
"""Get default SARIMA parameter search space.
Returns:
A dictionary representing the default SARIMA parameter search space.
"""
return get_default_sarima_parameter_search_space()
| 47.299451
| 287
| 0.644421
|
acfc1c0021b94112f9a3b3f2b5b7d76625347d58
| 171,317
|
py
|
Python
|
src/oci/artifacts/artifacts_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/artifacts/artifacts_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/artifacts/artifacts_client.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import artifacts_type_mapping
missing = Sentinel("Missing")
class ArtifactsClient(object):
"""
API covering the [Registry](/iaas/Content/Registry/Concepts/registryoverview.htm) services.
Use this API to manage resources such as container images and repositories.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
:param allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this client should allow control characters in the response object. By default, the client will not
allow control characters to be in the response object.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20160918',
'service_endpoint_template': 'https://artifacts.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
if 'allow_control_chars' in kwargs:
base_client_init_kwargs['allow_control_chars'] = kwargs.get('allow_control_chars')
self.base_client = BaseClient("artifacts", config, signer, artifacts_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def change_container_repository_compartment(self, repository_id, change_container_repository_compartment_details, **kwargs):
"""
Moves a container repository into a different compartment within the same tenancy. For information about moving
resources between compartments, see
`Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str repository_id: (required)
The `OCID`__ of the container repository.
Example: `ocid1.containerrepo.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.ChangeContainerRepositoryCompartmentDetails change_container_repository_compartment_details: (required)
Change container repository compartment details.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/change_container_repository_compartment.py.html>`__ to see an example of how to use change_container_repository_compartment API.
"""
resource_path = "/container/repositories/{repositoryId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_container_repository_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_container_repository_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_container_repository_compartment_details)
def change_repository_compartment(self, repository_id, change_repository_compartment_details, **kwargs):
"""
Moves a repository into a different compartment within the same tenancy. For information about moving
resources between compartments, see
`Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.ChangeRepositoryCompartmentDetails change_repository_compartment_details: (required)
Moves a repository into a different compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/change_repository_compartment.py.html>`__ to see an example of how to use change_repository_compartment API.
"""
resource_path = "/repositories/{repositoryId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_repository_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_repository_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_repository_compartment_details)
def create_container_image_signature(self, create_container_image_signature_details, **kwargs):
"""
Upload a signature to an image.
:param oci.artifacts.models.CreateContainerImageSignatureDetails create_container_image_signature_details: (required)
Upload container image signature details
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImageSignature`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/create_container_image_signature.py.html>`__ to see an example of how to use create_container_image_signature API.
"""
resource_path = "/container/imageSignatures"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_container_image_signature got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_container_image_signature_details,
response_type="ContainerImageSignature")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_container_image_signature_details,
response_type="ContainerImageSignature")
def create_container_repository(self, create_container_repository_details, **kwargs):
"""
Create a new empty container repository. Avoid entering confidential information.
:param oci.artifacts.models.CreateContainerRepositoryDetails create_container_repository_details: (required)
Create container repository details.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerRepository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/create_container_repository.py.html>`__ to see an example of how to use create_container_repository API.
"""
resource_path = "/container/repositories"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_container_repository got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_container_repository_details,
response_type="ContainerRepository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_container_repository_details,
response_type="ContainerRepository")
def create_repository(self, create_repository_details, **kwargs):
"""
Creates a new repository for storing artifacts.
:param oci.artifacts.models.CreateRepositoryDetails create_repository_details: (required)
Creates a new repository for storing artifacts.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.Repository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/create_repository.py.html>`__ to see an example of how to use create_repository API.
"""
resource_path = "/repositories"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_repository got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_repository_details,
response_type="Repository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_repository_details,
response_type="Repository")
def delete_container_image(self, image_id, **kwargs):
"""
Delete a container image.
:param str image_id: (required)
The `OCID`__ of the container image.
Example: `ocid1.containerimage.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_container_image.py.html>`__ to see an example of how to use delete_container_image API.
"""
resource_path = "/container/images/{imageId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_container_image got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageId": image_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_container_image_signature(self, image_signature_id, **kwargs):
"""
Delete a container image signature.
:param str image_signature_id: (required)
The `OCID`__ of the container image signature.
Example: `ocid1.containersignature.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_container_image_signature.py.html>`__ to see an example of how to use delete_container_image_signature API.
"""
resource_path = "/container/imageSignatures/{imageSignatureId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_container_image_signature got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageSignatureId": image_signature_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_container_repository(self, repository_id, **kwargs):
"""
Delete container repository.
:param str repository_id: (required)
The `OCID`__ of the container repository.
Example: `ocid1.containerrepo.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_container_repository.py.html>`__ to see an example of how to use delete_container_repository API.
"""
resource_path = "/container/repositories/{repositoryId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_container_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_generic_artifact(self, artifact_id, **kwargs):
"""
Deletes an artifact with a specified `OCID`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_id: (required)
The `OCID`__ of the artifact.
Example: `ocid1.genericartifact.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_generic_artifact.py.html>`__ to see an example of how to use delete_generic_artifact API.
"""
resource_path = "/generic/artifacts/{artifactId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_generic_artifact got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"artifactId": artifact_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_generic_artifact_by_path(self, repository_id, artifact_path, version, **kwargs):
"""
Deletes an artifact with a specified `artifactPath` and `version`.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_path: (required)
A user-defined path to describe the location of an artifact. You can use slashes to organize the repository, but slashes do not create a directory structure. An artifact path does not include an artifact version.
Example: `project01/my-web-app/artifact-abc`
:param str version: (required)
A user-defined string to describe the artifact version.
Example: `1.1.2` or `1.2-beta-2`
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_generic_artifact_by_path.py.html>`__ to see an example of how to use delete_generic_artifact_by_path API.
"""
resource_path = "/generic/repositories/{repositoryId}/artifactPaths/{artifactPath}/versions/{version}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_generic_artifact_by_path got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id,
"artifactPath": artifact_path,
"version": version
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_repository(self, repository_id, **kwargs):
"""
Deletes the specified repository. This operation fails unless all associated artifacts are in a DELETED state. You must delete all associated artifacts before deleting a repository.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/delete_repository.py.html>`__ to see an example of how to use delete_repository API.
"""
resource_path = "/repositories/{repositoryId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_container_configuration(self, compartment_id, **kwargs):
"""
Get container configuration.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerConfiguration`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_container_configuration.py.html>`__ to see an example of how to use get_container_configuration API.
"""
resource_path = "/container/configuration"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_container_configuration got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerConfiguration")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerConfiguration")
def get_container_image(self, image_id, **kwargs):
"""
Get container image metadata.
:param str image_id: (required)
The `OCID`__ of the container image.
Example: `ocid1.containerimage.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImage`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_container_image.py.html>`__ to see an example of how to use get_container_image API.
"""
resource_path = "/container/images/{imageId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_container_image got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageId": image_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerImage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerImage")
def get_container_image_signature(self, image_signature_id, **kwargs):
"""
Get container image signature metadata.
:param str image_signature_id: (required)
The `OCID`__ of the container image signature.
Example: `ocid1.containersignature.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImageSignature`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_container_image_signature.py.html>`__ to see an example of how to use get_container_image_signature API.
"""
resource_path = "/container/imageSignatures/{imageSignatureId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_container_image_signature got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageSignatureId": image_signature_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerImageSignature")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerImageSignature")
def get_container_repository(self, repository_id, **kwargs):
"""
Get container repository.
:param str repository_id: (required)
The `OCID`__ of the container repository.
Example: `ocid1.containerrepo.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerRepository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_container_repository.py.html>`__ to see an example of how to use get_container_repository API.
"""
resource_path = "/container/repositories/{repositoryId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_container_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerRepository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ContainerRepository")
def get_generic_artifact(self, artifact_id, **kwargs):
"""
Gets information about an artifact with a specified `OCID`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_id: (required)
The `OCID`__ of the artifact.
Example: `ocid1.genericartifact.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.GenericArtifact`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_generic_artifact.py.html>`__ to see an example of how to use get_generic_artifact API.
"""
resource_path = "/generic/artifacts/{artifactId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_generic_artifact got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"artifactId": artifact_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="GenericArtifact")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="GenericArtifact")
def get_generic_artifact_by_path(self, repository_id, artifact_path, version, **kwargs):
"""
Gets information about an artifact with a specified `artifactPath` and `version`.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_path: (required)
A user-defined path to describe the location of an artifact. You can use slashes to organize the repository, but slashes do not create a directory structure. An artifact path does not include an artifact version.
Example: `project01/my-web-app/artifact-abc`
:param str version: (required)
A user-defined string to describe the artifact version.
Example: `1.1.2` or `1.2-beta-2`
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.GenericArtifact`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_generic_artifact_by_path.py.html>`__ to see an example of how to use get_generic_artifact_by_path API.
"""
resource_path = "/generic/repositories/{repositoryId}/artifactPaths/{artifactPath}/versions/{version}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_generic_artifact_by_path got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id,
"artifactPath": artifact_path,
"version": version
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="GenericArtifact")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="GenericArtifact")
def get_repository(self, repository_id, **kwargs):
"""
Gets the specified repository's information.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.Repository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/get_repository.py.html>`__ to see an example of how to use get_repository API.
"""
resource_path = "/repositories/{repositoryId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Repository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Repository")
def list_container_image_signatures(self, compartment_id, **kwargs):
"""
List container image signatures in an image.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param bool compartment_id_in_subtree: (optional)
When set to true, the hierarchy of compartments is traversed
and all compartments and subcompartments in the tenancy are
inspected depending on the the setting of `accessLevel`.
Default is false. Can only be set to true when calling the API
on the tenancy (root compartment).
:param str image_id: (optional)
A filter to return a container image summary only for the specified container image OCID.
:param str repository_id: (optional)
A filter to return container images only for the specified container repository OCID.
:param str repository_name: (optional)
A filter to return container images or container image signatures that match the repository name.
Example: `foo` or `foo*`
:param str image_digest: (optional)
The digest of the container image.
Example: `sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa`
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param str kms_key_id: (optional)
The `OCID`__ of the kmsKeyVersionId used to sign the container image.
Example: `ocid1.keyversion.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str kms_key_version_id: (optional)
The `OCID`__ of the kmsKeyVersionId used to sign the container image.
Example: `ocid1.keyversion.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str signing_algorithm: (optional)
The algorithm to be used for signing. These are the only supported signing algorithms for container images.
Allowed values are: "SHA_224_RSA_PKCS_PSS", "SHA_256_RSA_PKCS_PSS", "SHA_384_RSA_PKCS_PSS", "SHA_512_RSA_PKCS_PSS"
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
Example: `50`
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImageSignatureCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/list_container_image_signatures.py.html>`__ to see an example of how to use list_container_image_signatures API.
"""
resource_path = "/container/imageSignatures"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"compartment_id_in_subtree",
"image_id",
"repository_id",
"repository_name",
"image_digest",
"display_name",
"kms_key_id",
"kms_key_version_id",
"signing_algorithm",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_container_image_signatures got unknown kwargs: {!r}".format(extra_kwargs))
if 'signing_algorithm' in kwargs:
signing_algorithm_allowed_values = ["SHA_224_RSA_PKCS_PSS", "SHA_256_RSA_PKCS_PSS", "SHA_384_RSA_PKCS_PSS", "SHA_512_RSA_PKCS_PSS"]
if kwargs['signing_algorithm'] not in signing_algorithm_allowed_values:
raise ValueError(
"Invalid value for `signing_algorithm`, must be one of {0}".format(signing_algorithm_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentIdInSubtree": kwargs.get("compartment_id_in_subtree", missing),
"compartmentId": compartment_id,
"imageId": kwargs.get("image_id", missing),
"repositoryId": kwargs.get("repository_id", missing),
"repositoryName": kwargs.get("repository_name", missing),
"imageDigest": kwargs.get("image_digest", missing),
"displayName": kwargs.get("display_name", missing),
"kmsKeyId": kwargs.get("kms_key_id", missing),
"kmsKeyVersionId": kwargs.get("kms_key_version_id", missing),
"signingAlgorithm": kwargs.get("signing_algorithm", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerImageSignatureCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerImageSignatureCollection")
def list_container_images(self, compartment_id, **kwargs):
"""
List container images in a compartment.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param bool compartment_id_in_subtree: (optional)
When set to true, the hierarchy of compartments is traversed
and all compartments and subcompartments in the tenancy are
inspected depending on the the setting of `accessLevel`.
Default is false. Can only be set to true when calling the API
on the tenancy (root compartment).
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param str image_id: (optional)
A filter to return a container image summary only for the specified container image OCID.
:param bool is_versioned: (optional)
A filter to return container images based on whether there are any associated versions.
:param str repository_id: (optional)
A filter to return container images only for the specified container repository OCID.
:param str repository_name: (optional)
A filter to return container images or container image signatures that match the repository name.
Example: `foo` or `foo*`
:param str version: (optional)
A filter to return container images that match the version.
Example: `foo` or `foo*`
:param str lifecycle_state: (optional)
A filter to return only resources that match the given lifecycle state name exactly.
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
Example: `50`
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImageCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/list_container_images.py.html>`__ to see an example of how to use list_container_images API.
"""
resource_path = "/container/images"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"compartment_id_in_subtree",
"display_name",
"image_id",
"is_versioned",
"repository_id",
"repository_name",
"version",
"lifecycle_state",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_container_images got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentIdInSubtree": kwargs.get("compartment_id_in_subtree", missing),
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"imageId": kwargs.get("image_id", missing),
"isVersioned": kwargs.get("is_versioned", missing),
"repositoryId": kwargs.get("repository_id", missing),
"repositoryName": kwargs.get("repository_name", missing),
"version": kwargs.get("version", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerImageCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerImageCollection")
def list_container_repositories(self, compartment_id, **kwargs):
"""
List container repositories in a compartment.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param bool compartment_id_in_subtree: (optional)
When set to true, the hierarchy of compartments is traversed
and all compartments and subcompartments in the tenancy are
inspected depending on the the setting of `accessLevel`.
Default is false. Can only be set to true when calling the API
on the tenancy (root compartment).
:param str repository_id: (optional)
A filter to return container images only for the specified container repository OCID.
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param bool is_public: (optional)
A filter to return resources that match the isPublic value.
:param str lifecycle_state: (optional)
A filter to return only resources that match the given lifecycle state name exactly.
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
Example: `50`
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerRepositoryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/list_container_repositories.py.html>`__ to see an example of how to use list_container_repositories API.
"""
resource_path = "/container/repositories"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"compartment_id_in_subtree",
"repository_id",
"display_name",
"is_public",
"lifecycle_state",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_container_repositories got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentIdInSubtree": kwargs.get("compartment_id_in_subtree", missing),
"compartmentId": compartment_id,
"repositoryId": kwargs.get("repository_id", missing),
"displayName": kwargs.get("display_name", missing),
"isPublic": kwargs.get("is_public", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerRepositoryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ContainerRepositoryCollection")
def list_generic_artifacts(self, compartment_id, repository_id, **kwargs):
"""
Lists artifacts in the specified repository.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str repository_id: (required)
A filter to return the artifacts only for the specified repository OCID.
:param str id: (optional)
A filter to return the resources for the specified OCID.
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param str artifact_path: (optional)
Filter results by a prefix for the `artifactPath` and and return artifacts that begin with the specified prefix in their path.
:param str version: (optional)
Filter results by a prefix for `version` and return artifacts that that begin with the specified prefix in their version.
:param str sha256: (optional)
Filter results by a specified SHA256 digest for the artifact.
:param str lifecycle_state: (optional)
A filter to return only resources that match the given lifecycle state name exactly.
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
Example: `50`
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.GenericArtifactCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/list_generic_artifacts.py.html>`__ to see an example of how to use list_generic_artifacts API.
"""
resource_path = "/generic/artifacts"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"id",
"display_name",
"artifact_path",
"version",
"sha256",
"lifecycle_state",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_generic_artifacts got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"repositoryId": repository_id,
"id": kwargs.get("id", missing),
"displayName": kwargs.get("display_name", missing),
"artifactPath": kwargs.get("artifact_path", missing),
"version": kwargs.get("version", missing),
"sha256": kwargs.get("sha256", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="GenericArtifactCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="GenericArtifactCollection")
def list_repositories(self, compartment_id, **kwargs):
"""
Lists repositories in the specified compartment.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str id: (optional)
A filter to return the resources for the specified OCID.
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param bool is_immutable: (optional)
A filter to return resources that match the isImmutable value.
:param str lifecycle_state: (optional)
A filter to return only resources that match the given lifecycle state name exactly.
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
Example: `50`
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.RepositoryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/list_repositories.py.html>`__ to see an example of how to use list_repositories API.
"""
resource_path = "/repositories"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"id",
"display_name",
"is_immutable",
"lifecycle_state",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_repositories got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"id": kwargs.get("id", missing),
"displayName": kwargs.get("display_name", missing),
"isImmutable": kwargs.get("is_immutable", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="RepositoryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="RepositoryCollection")
def remove_container_version(self, image_id, remove_container_version_details, **kwargs):
"""
Remove version from container image.
:param str image_id: (required)
The `OCID`__ of the container image.
Example: `ocid1.containerimage.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.RemoveContainerVersionDetails remove_container_version_details: (required)
Remove version details.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImage`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/remove_container_version.py.html>`__ to see an example of how to use remove_container_version API.
"""
resource_path = "/container/images/{imageId}/actions/removeVersion"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_container_version got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageId": image_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_container_version_details,
response_type="ContainerImage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_container_version_details,
response_type="ContainerImage")
def restore_container_image(self, image_id, restore_container_image_details, **kwargs):
"""
Restore a container image.
:param str image_id: (required)
The `OCID`__ of the container image.
Example: `ocid1.containerimage.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.RestoreContainerImageDetails restore_container_image_details: (required)
Restore container image details.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerImage`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/restore_container_image.py.html>`__ to see an example of how to use restore_container_image API.
"""
resource_path = "/container/images/{imageId}/actions/restore"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"restore_container_image got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"imageId": image_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=restore_container_image_details,
response_type="ContainerImage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=restore_container_image_details,
response_type="ContainerImage")
def update_container_configuration(self, compartment_id, update_container_configuration_details, **kwargs):
"""
Update container configuration.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.UpdateContainerConfigurationDetails update_container_configuration_details: (required)
Update container configuration details.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerConfiguration`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/update_container_configuration.py.html>`__ to see an example of how to use update_container_configuration API.
"""
resource_path = "/container/configuration"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_container_configuration got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
body=update_container_configuration_details,
response_type="ContainerConfiguration")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
body=update_container_configuration_details,
response_type="ContainerConfiguration")
def update_container_repository(self, repository_id, update_container_repository_details, **kwargs):
"""
Modify the properties of a container repository. Avoid entering confidential information.
:param str repository_id: (required)
The `OCID`__ of the container repository.
Example: `ocid1.containerrepo.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.UpdateContainerRepositoryDetails update_container_repository_details: (required)
Update container repository details.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.ContainerRepository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/update_container_repository.py.html>`__ to see an example of how to use update_container_repository API.
"""
resource_path = "/container/repositories/{repositoryId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_container_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_container_repository_details,
response_type="ContainerRepository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_container_repository_details,
response_type="ContainerRepository")
def update_generic_artifact(self, artifact_id, update_generic_artifact_details, **kwargs):
"""
Updates the artifact with the specified `OCID`__. You can only update the tags of an artifact.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_id: (required)
The `OCID`__ of the artifact.
Example: `ocid1.genericartifact.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.UpdateGenericArtifactDetails update_generic_artifact_details: (required)
Updates the artifact with the specified `OCID`__. You can only update the tags of an artifact.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.GenericArtifact`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/update_generic_artifact.py.html>`__ to see an example of how to use update_generic_artifact API.
"""
resource_path = "/generic/artifacts/{artifactId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_generic_artifact got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"artifactId": artifact_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_generic_artifact_details,
response_type="GenericArtifact")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_generic_artifact_details,
response_type="GenericArtifact")
def update_generic_artifact_by_path(self, repository_id, artifact_path, version, update_generic_artifact_by_path_details, **kwargs):
"""
Updates an artifact with a specified `artifactPath` and `version`. You can only update the tags of an artifact.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param str artifact_path: (required)
A user-defined path to describe the location of an artifact. You can use slashes to organize the repository, but slashes do not create a directory structure. An artifact path does not include an artifact version.
Example: `project01/my-web-app/artifact-abc`
:param str version: (required)
A user-defined string to describe the artifact version.
Example: `1.1.2` or `1.2-beta-2`
:param oci.artifacts.models.UpdateGenericArtifactByPathDetails update_generic_artifact_by_path_details: (required)
Updates an artifact with a specified `artifactPath` and `version`. You can only update the tags of an artifact.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.GenericArtifact`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/update_generic_artifact_by_path.py.html>`__ to see an example of how to use update_generic_artifact_by_path API.
"""
resource_path = "/generic/repositories/{repositoryId}/artifactPaths/{artifactPath}/versions/{version}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_generic_artifact_by_path got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id,
"artifactPath": artifact_path,
"version": version
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_generic_artifact_by_path_details,
response_type="GenericArtifact")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_generic_artifact_by_path_details,
response_type="GenericArtifact")
def update_repository(self, repository_id, update_repository_details, **kwargs):
"""
Updates the properties of a repository. You can update the `displayName` and `description` properties.
:param str repository_id: (required)
The `OCID`__ of the repository.
Example: `ocid1.artifactrepository.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param oci.artifacts.models.UpdateRepositoryDetails update_repository_details: (required)
Updates the properties of a repository.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.artifacts.models.Repository`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/artifacts/update_repository.py.html>`__ to see an example of how to use update_repository API.
"""
resource_path = "/repositories/{repositoryId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_repository got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"repositoryId": repository_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_repository_details,
response_type="Repository")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_repository_details,
response_type="Repository")
| 49.946647
| 261
| 0.657751
|
acfc1cdb43b13c9e8271ec41190ff86b013d8e59
| 5,246
|
py
|
Python
|
reporterapp/models.py
|
myles/python-reporter-app
|
006ff2f63671308eaeb16ae4da12cc999e43e14f
|
[
"MIT"
] | 1
|
2017-09-23T01:12:52.000Z
|
2017-09-23T01:12:52.000Z
|
reporterapp/models.py
|
myles/python-reporter-app
|
006ff2f63671308eaeb16ae4da12cc999e43e14f
|
[
"MIT"
] | 2
|
2017-03-15T17:26:57.000Z
|
2021-06-01T21:24:33.000Z
|
reporterapp/models.py
|
myles/python-reporter-app
|
006ff2f63671308eaeb16ae4da12cc999e43e14f
|
[
"MIT"
] | null | null | null |
from dateutil.parser import parse as date_parse
REPORT_IMPETUS_DISPLAY = {
0: 'Report button tapped',
1: 'Report button tapped while Reporter is asleep',
2: 'Report triggered by notification',
3: 'Report triggered by setting app to sleep',
4: 'Report triggered by waking up app'
}
CONNECTION_DISPLAY = {
0: 'Device is connected via cellular network',
1: 'Device is connected via WiFi',
2: 'Device is not connected'
}
QUESTION_TYPE_DISPLAY = {
0: 'Tokens',
1: 'Multi-Choice',
2: 'Yes / No',
3: 'Location',
4: 'People',
5: 'Number',
6: 'Note'
}
class ResultSet(list):
"""
A list of like object that holds results.
"""
def __init__(self):
super(ResultSet, self).__init__()
@property
def uniqueIdentifiers(self):
ids = []
for item in self:
if hasattr(item, 'uniqueIdentifier'):
ids.append(item.uniqueIdentifier)
return ids
class Model(object):
@classmethod
def parse(cls, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, json_list, *args):
"""
Prase a list of JSON objects into a result set of model instances.
"""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(obj, *args))
return results
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
self.uniqueIdentifier)
class Snapshot(Model):
"""A Reporter App Snapshot."""
@classmethod
def parse(cls, json, questions):
"""Parse a JSON object into a Snapshot instance."""
snapshot = cls()
for k, v in json.items():
setattr(snapshot, k, v)
if k == 'date':
setattr(snapshot, k, date_parse(v).replace(tzinfo=None))
if k == 'connection':
setattr(snapshot, 'connectionDisplay', CONNECTION_DISPLAY[v])
if k == 'reportImpetus':
setattr(snapshot, 'reportImpetusDisplay',
REPORT_IMPETUS_DISPLAY[v])
if k == 'battery':
setattr(snapshot, k, v)
setattr(snapshot, 'batteryDisplay', '{:.0f}%'.format(v*100))
if k in ['background', 'draft']:
setattr(snapshot, k, bool(v))
if k == 'responses':
setattr(snapshot, k, Response.parse_list(v, questions))
return snapshot
@classmethod
def parse_list(cls, json_list, questions):
"""
Prase a list of JSON objects into a result set of model instances.
"""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(obj, questions))
return results
class QuestionList(ResultSet):
"""A Reporter App Question List."""
def list_prompt(self):
return [x.prompt for x in self]
def get_prompt(self, prompt):
result = None
for item in self:
if item.prompt == prompt:
result = item
return result
class Question(Model):
"""A Reporter App Question."""
@classmethod
def parse(cls, json):
"""Prase a JSON object into a Question instance."""
question = cls()
for k, v in json.items():
setattr(question, k, v)
if k == 'questionType':
setattr(question, 'questionTypeDisplay',
QUESTION_TYPE_DISPLAY[v])
return question
@classmethod
def parse_list(cls, json_list):
"""
Prase a list of JSON objects into a result set of model instances.
"""
results = QuestionList()
for obj in json_list:
if obj and obj['prompt'] not in results.list_prompt():
results.append(cls.parse(obj))
return results
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
self.prompt)
class ResponseLocation(Model):
"""A Snaphot Location Response."""
@classmethod
def parse(cls, json):
"""Prase a JSON object into a ResponseLocation instance."""
response_location = cls()
for k, v in json.items():
setattr(response_location, k, v)
return response_location
@property
def foursquareUrl(self):
return 'https://foursquare.com/v/{0}'.format(self.foursquareVenueId)
class Response(Model):
"""A Snapshot Response."""
@classmethod
def parse(cls, json, questions):
"""Prase a JSON object into a Response instance."""
response = cls()
for k, v in json.items():
setattr(response, k, v)
if k == 'questionPrompt':
setattr(response, 'question',
questions.get_prompt(json['questionPrompt']))
if k == 'tokens':
setattr(response, k, [x['text'] for x in v])
if k == 'locationResponse':
setattr(response, k, ResponseLocation.parse(v))
return response
| 24.980952
| 77
| 0.559474
|
acfc1d5cd63f85e7cacf5e9c25863ec14925dbe8
| 1,201
|
py
|
Python
|
wave_reader/web/models/create_hook_request_labels.py
|
ztroop/wave-reader-utils
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 11
|
2021-02-10T04:32:07.000Z
|
2021-12-29T04:17:06.000Z
|
wave_reader/web/models/create_hook_request_labels.py
|
ztroop/wave-reader
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 12
|
2021-01-23T06:45:19.000Z
|
2021-12-29T04:20:53.000Z
|
wave_reader/web/models/create_hook_request_labels.py
|
ztroop/wave-reader
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 5
|
2021-02-12T09:15:20.000Z
|
2021-09-13T05:05:40.000Z
|
from typing import Any, Dict, List, Type, TypeVar
import attr
T = TypeVar("T", bound="CreateHookRequestLabels")
@attr.s(auto_attribs=True)
class CreateHookRequestLabels:
additional_properties: Dict[str, str] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
create_hook_request_labels = cls()
create_hook_request_labels.additional_properties = d
return create_hook_request_labels
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> str:
return self.additional_properties[key]
def __setitem__(self, key: str, value: str) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 27.930233
| 77
| 0.674438
|
acfc1e37b2b22bbe628d43228efd745210955f3c
| 1,049
|
py
|
Python
|
Assignments/Assignment1.py
|
BhishmDaslaniya/Python_Programming_Lab
|
9f4c6c4ff5da35cd3a0123a0b088b33ba0f99e9f
|
[
"Apache-2.0"
] | null | null | null |
Assignments/Assignment1.py
|
BhishmDaslaniya/Python_Programming_Lab
|
9f4c6c4ff5da35cd3a0123a0b088b33ba0f99e9f
|
[
"Apache-2.0"
] | null | null | null |
Assignments/Assignment1.py
|
BhishmDaslaniya/Python_Programming_Lab
|
9f4c6c4ff5da35cd3a0123a0b088b33ba0f99e9f
|
[
"Apache-2.0"
] | null | null | null |
# [17CE023] Bhishm Daslaniya
'''
Algorithm!
--> Build a list of tuples such that the string "aaabbc" can be squashed down to [("a", 3), ("b", 2), ("c", 1)]
--> Add to answer all combinations of substrings from these tuples which would represent palindromes which have all same letters
--> traverse this list to specifically find the second case mentioned in probelm
'''
def substrCount(n, s):
l = []
count = 0
current = None
for i in range(n):
if s[i] == current:
count += 1
else:
if current is not None:
l.append((current, count))
current = s[i]
count = 1
l.append((current, count))
# print(l)
ans = 0
for i in l:
ans += (i[1] * (i[1] + 1)) // 2
for i in range(1, len(l) - 1):
if l[i - 1][0] == l[i + 1][0] and l[i][1] == 1:
ans += min(l[i - 1][1], l[i + 1][1])
return ans
if __name__ == '__main__':
n = int(input())
s = input()
result = substrCount(n,s)
print(result)
| 24.97619
| 128
| 0.526215
|
acfc1e75c0fb5b3d5709896d7efb64699e0d62d2
| 16,398
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_imperative_basic.py
|
cryoco/Paddle
|
39ac41f137d685af66078adf2f35d65473978b4a
|
[
"Apache-2.0"
] | 3
|
2019-07-17T09:30:31.000Z
|
2021-12-27T03:16:55.000Z
|
python/paddle/fluid/tests/unittests/test_imperative_basic.py
|
cryoco/Paddle
|
39ac41f137d685af66078adf2f35d65473978b4a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_imperative_basic.py
|
cryoco/Paddle
|
39ac41f137d685af66078adf2f35d65473978b4a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import FC
from test_imperative_base import new_program_scope
class MyLayer(fluid.Layer):
def __init__(self, name_scope):
super(MyLayer, self).__init__(name_scope)
def forward(self, inputs):
x = fluid.layers.relu(inputs)
self._x_for_debug = x
x = fluid.layers.elementwise_mul(x, x)
x = fluid.layers.reduce_sum(x)
return [x]
class MLP(fluid.Layer):
def __init__(self, name_scope):
super(MLP, self).__init__(name_scope)
self._fc1 = FC(self.full_name(),
3,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)))
self._fc2 = FC(self.full_name(),
4,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
x = self._fc1(inputs)
x = self._fc2(x)
x = fluid.layers.reduce_sum(x)
return x
class SimpleRNNCell(fluid.Layer):
def __init__(self, name_scope, step_input_size, hidden_size, output_size,
param_attr):
super(SimpleRNNCell, self).__init__(name_scope)
self.step_input_size = step_input_size
self.hidden_size = hidden_size
self.output_size = output_size
self._dtype = core.VarDesc.VarType.FP32
self.param_attr = param_attr
def _build_once(self, inputs, pre_hidden):
i2h_param_shape = [self.step_input_size, self.hidden_size]
h2h_param_shape = [self.hidden_size, self.hidden_size]
h2o_param_shape = [self.output_size, self.hidden_size]
self._i2h_w = self.create_parameter(
attr=self.param_attr,
shape=i2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2h_w = self.create_parameter(
attr=self.param_attr,
shape=h2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2o_w = self.create_parameter(
attr=self.param_attr,
shape=h2o_param_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input, pre_hidden):
tmp_i2h = self.create_variable(dtype=self._dtype)
tmp_h2h = self.create_variable(dtype=self._dtype)
hidden = self.create_variable(dtype=self._dtype)
out = self.create_variable(dtype=self._dtype)
softmax_out = self.create_variable(dtype=self._dtype)
reduce_out = self.create_variable(dtype=self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": input,
"Y": self._i2h_w},
outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="mul",
inputs={"X": pre_hidden,
"Y": self._h2h_w},
outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="elementwise_add",
inputs={'X': tmp_h2h,
'Y': tmp_i2h},
outputs={'Out': hidden},
attrs={'axis': -1,
'use_mkldnn': False})
hidden = self._helper.append_activation(hidden, act='tanh')
self._helper.append_op(
type="mul",
inputs={"X": hidden,
"Y": self._h2o_w},
outputs={"Out": out},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="softmax",
inputs={"X": out},
outputs={"Out": softmax_out},
attrs={"use_cudnn": False})
self._helper.append_op(
type='reduce_sum',
inputs={'X': softmax_out},
outputs={'Out': reduce_out},
attrs={'dim': [],
'keep_dim': False,
'reduce_all': True})
return reduce_out, hidden
class SimpleRNN(fluid.Layer):
def __init__(self, name_scope):
super(SimpleRNN, self).__init__(name_scope)
self.seq_len = 4
self._cell = SimpleRNNCell(
self.full_name(),
3,
3,
3,
fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
outs = list()
pre_hiddens = list()
init_hidden = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
shape=[1, 3],
dtype='float32',
is_bias=False)
pre_hidden = init_hidden
for i in range(self.seq_len):
input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1])
input = fluid.layers.reshape(input, shape=[1, 3])
out_softmax, pre_hidden = self._cell(input, pre_hidden)
outs.append(out_softmax)
return outs, pre_hiddens
class TestImperative(unittest.TestCase):
def test_sum_op(self):
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient = False
inputs.append(tmp)
ret = fluid.layers.sums(inputs)
loss = fluid.layers.reduce_sum(ret)
loss.backward()
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient = False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
self.assertTrue(np.allclose(ret.numpy(), x * 10))
self.assertTrue(np.allclose(inputs[0].gradient(), x))
self.assertTrue(np.allclose(ret2.numpy(), x * 10))
a = inputs2[0].gradient()
self.assertTrue(np.allclose(inputs2[0].gradient(), x))
def test_layer(self):
with fluid.dygraph.guard():
cl = core.Layer()
cl.forward([])
l = fluid.Layer("l")
self.assertRaises(NotImplementedError, l.forward, [])
def test_layer_in_out(self):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.dygraph.guard():
var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp.stop_gradient = False
l = MyLayer("my_layer")
x = l(var_inp)[0]
self.assertIsNotNone(x)
dy_out = x.numpy()
x.backward()
dy_grad = l._x_for_debug.gradient()
with fluid.dygraph.guard():
var_inp2 = fluid.dygraph.base.to_variable(np_inp)
var_inp2.stop_gradient = False
l2 = MyLayer("my_layer")
x2 = l2(var_inp2)[0]
self.assertIsNotNone(x2)
dy_out2 = x2.numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
x2.backward(backward_strategy)
dy_grad2 = l2._x_for_debug.gradient()
with new_program_scope():
inp = fluid.layers.data(
name="inp", shape=[3], append_batch_size=False)
l = MyLayer("my_layer")
x = l(inp)[0]
param_grads = fluid.backward.append_backward(
x, parameter_list=[l._x_for_debug.name])[0]
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
static_out, static_grad = exe.run(
feed={inp.name: np_inp},
fetch_list=[x.name, param_grads[1].name])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
self.assertTrue(np.allclose(dy_out2, static_out))
self.assertTrue(np.allclose(dy_grad2, static_grad))
def test_mlp(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.dygraph.guard():
var_inp = fluid.dygraph.base.to_variable(np_inp)
mlp = MLP("mlp")
out = mlp(var_inp)
dy_out = out.numpy()
out.backward()
dy_grad = mlp._fc1._w.gradient()
with fluid.dygraph.guard():
var_inp2 = fluid.dygraph.base.to_variable(np_inp)
mlp2 = MLP("mlp")
out2 = mlp2(var_inp2)
dy_out2 = out2.numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
out2.backward(backward_strategy)
dy_grad2 = mlp2._fc1._w.gradient()
with new_program_scope():
inp = fluid.layers.data(
name="inp", shape=[2, 2], append_batch_size=False)
mlp = MLP("mlp")
out = mlp(inp)
param_grads = fluid.backward.append_backward(
out, parameter_list=[mlp._fc1._w.name])[0]
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
exe.run(fluid.default_startup_program())
static_out, static_grad = exe.run(
feed={inp.name: np_inp},
fetch_list=[out.name, param_grads[1].name])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
self.assertTrue(np.allclose(dy_out2, static_out))
self.assertTrue(np.allclose(dy_grad2, static_grad))
params = mlp.parameters(True)
self.assertEqual("mlp/MLP_0/FC_0.w_0", params[0].name)
self.assertEqual("mlp/MLP_0/FC_0.b_0", params[1].name)
self.assertEqual("mlp/MLP_0/FC_1.w_0", params[2].name)
self.assertEqual("mlp/MLP_0/FC_1.b_0", params[3].name)
self.assertEqual(len(params), 4)
sublayers = mlp.sublayers(True)
self.assertEqual(mlp._fc1, sublayers[0])
self.assertEqual(mlp._fc2, sublayers[1])
self.assertEqual(len(sublayers), 2)
def test_dygraph_vs_static(self):
inp1 = np.random.rand(4, 3, 3)
inp2 = np.random.rand(4, 3, 3)
# dynamic graph
with fluid.dygraph.guard():
if np.sum(inp1) < np.sum(inp2):
x = fluid.layers.elementwise_add(inp1, inp2)
else:
x = fluid.layers.elementwise_sub(inp1, inp2)
dygraph_result = x.numpy()
# static graph
with new_program_scope():
inp_data1 = fluid.layers.data(
name='inp1', shape=[3, 3], dtype=np.float32)
inp_data2 = fluid.layers.data(
name='inp2', shape=[3, 3], dtype=np.float32)
a = fluid.layers.expand(
fluid.layers.reshape(
fluid.layers.reduce_sum(inp_data1), [1, 1]), [4, 1])
b = fluid.layers.expand(
fluid.layers.reshape(
fluid.layers.reduce_sum(inp_data2), [1, 1]), [4, 1])
cond = fluid.layers.less_than(x=a, y=b)
ie = fluid.layers.IfElse(cond)
with ie.true_block():
d1 = ie.input(inp_data1)
d2 = ie.input(inp_data2)
d3 = fluid.layers.elementwise_add(d1, d2)
ie.output(d3)
with ie.false_block():
d1 = ie.input(inp_data1)
d2 = ie.input(inp_data2)
d3 = fluid.layers.elementwise_sub(d1, d2)
ie.output(d3)
out = ie()
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
static_result = exe.run(fluid.default_main_program(),
feed={'inp1': inp1,
'inp2': inp2},
fetch_list=out)[0]
self.assertTrue(np.allclose(dygraph_result, static_result))
def test_rnn(self):
np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
[10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32)
with fluid.dygraph.guard():
var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3].numpy()
outs[3].backward()
dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()
dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()
with fluid.dygraph.guard():
var_inp2 = fluid.dygraph.base.to_variable(np_inp)
var_inp2 = fluid.layers.reshape(var_inp2, shape=[1, 4, 3])
simple_rnn2 = SimpleRNN("simple_rnn")
outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
dy_out2 = outs2[3].numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
outs2[3].backward(backward_strategy)
dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()
dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()
dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()
with new_program_scope():
inp = fluid.layers.data(
name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward(outs[3])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
feed={inp.name: np_inp},
fetch_list=[
outs[3].name, param_grads[0][1].name,
param_grads[1][1].name, param_grads[2][1].name
])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
self.assertTrue(np.allclose(dy_out2, static_out))
self.assertTrue(np.allclose(dy_grad_h2o2, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h))
def test_layer_attrs(self):
layer = fluid.dygraph.Layer("test")
layer.test_attr = 1
self.assertFalse(hasattr(layer, "whatever"))
self.assertTrue(hasattr(layer, "test_attr"))
self.assertEqual(layer.test_attr, 1)
if __name__ == '__main__':
unittest.main()
| 38.674528
| 84
| 0.572936
|
acfc1ec46c8081eabe25eab0d3b9aab95ecaff91
| 2,341
|
py
|
Python
|
app/src/main/python/full_recommender.py
|
Morata3/Recommendify
|
287151190e9915293cf89fffb51ef093d037ae80
|
[
"MIT"
] | 4
|
2021-01-19T15:07:00.000Z
|
2021-12-02T19:57:27.000Z
|
app/src/main/python/full_recommender.py
|
Hadryan/Recommendify
|
287151190e9915293cf89fffb51ef093d037ae80
|
[
"MIT"
] | null | null | null |
app/src/main/python/full_recommender.py
|
Hadryan/Recommendify
|
287151190e9915293cf89fffb51ef093d037ae80
|
[
"MIT"
] | 4
|
2020-12-12T22:47:17.000Z
|
2021-02-03T11:03:58.000Z
|
from sklearn.neighbors import NearestNeighbors
from fuzzywuzzy import fuzz
import numpy as np
import pandas as pd
import scipy.sparse
from scipy.sparse import csr_matrix
import joblib
import json
from os.path import dirname, join
def recommend():
# obtain a sparse matrix
mat_songs_features_filename= join(dirname(__file__), "df_mat_songs_features.npz")
mat_songs_features = scipy.sparse.load_npz(mat_songs_features_filename)
decode_id_song = {}
decode_id_song_filename= join(dirname(__file__), 'decode_id_songs.json')
with open(decode_id_song_filename, 'r') as fp:
decode_id_song = json.load(fp)
song= "Iconography"
#model = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
#model.fit(mat_songs_features)
model_filename = join(dirname(__file__), 'user_based_collaborative.sav')
model = joblib.load(model_filename)
# load the model from disk
recommendations = []
# Get the id of the song according to the text
# get match
match_tuple = []
for title, idx in decode_id_song.items():
ratio = fuzz.ratio(title.lower(), song.lower())
if ratio >= 60:
match_tuple.append((title, idx, ratio))
# sort
match_tuple = sorted(match_tuple, key=lambda x: x[2])[::-1]
if not match_tuple:
print(f"The recommendation system could not find a match for {song}")
recom_song_id = match_tuple[0][1]
# Start the recommendation process
print(f"Starting the recommendation process for {song} ...")
# Return the n neighbors for the song id
distances, indices = model.kneighbors(mat_songs_features[recom_song_id], n_neighbors=10+1)
recommendation_ids =sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[:0:-1]
# return the name of the song using a mapping dictionary
recommendations_map = {song_id: song_title for song_title, song_id in decode_id_song.items()}
# Translate this recommendations into the ranking of song titles recommended
for i, (idx, dist) in enumerate(recommendation_ids):
recommendations.append(recommendations_map[idx])
print(recommendations)
#new_recommendations = model.make_recommendation(new_song=song, n_recommendations=10)
return ', '.join(recommendations)
| 33.927536
| 126
| 0.718496
|
acfc1efdf164556a42c02ffbd65b7f7678435be3
| 958
|
py
|
Python
|
bayesian_methods_for_hackers/simulate_messages_ch02.py
|
noelevans/playground
|
da529e967a15bcb217fff091ac0ec5c4dc1821ce
|
[
"MIT"
] | 1
|
2015-04-28T20:36:57.000Z
|
2015-04-28T20:36:57.000Z
|
bayesian_methods_for_hackers/simulate_messages_ch02.py
|
noelevans/playground
|
da529e967a15bcb217fff091ac0ec5c4dc1821ce
|
[
"MIT"
] | 2
|
2021-02-03T21:05:54.000Z
|
2021-03-23T09:25:43.000Z
|
bayesian_methods_for_hackers/simulate_messages_ch02.py
|
noelevans/playground
|
da529e967a15bcb217fff091ac0ec5c4dc1821ce
|
[
"MIT"
] | null | null | null |
import json
import matplotlib
import numpy as np
import pymc as pm
from matplotlib import pyplot as plt
def main():
tau = pm.rdiscrete_uniform(0, 80)
print tau
alpha = 1. / 20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
print lambda_1, lambda_2
data = np.r_[pm.rpoisson(lambda_1, tau), pm.rpoisson(lambda_2, 80 - tau)]
def plot_artificial_sms_dataset():
tau = pm.rdiscrete_uniform(0, 80)
alpha = 1. / 20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
data = np.r_[pm.rpoisson(lambda_1, tau), pm.rpoisson(lambda_2, 80 - tau)]
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau - 1, data[tau - 1], color="r", label="user behaviour changed")
plt.xlim(0, 80)
plt.title("More example of artificial datasets")
for i in range(1, 5):
plt.subplot(4, 1, i)
plot_artificial_sms_dataset()
plt.show()
if __name__ == '__main__':
main()
| 26.611111
| 82
| 0.631524
|
acfc20537378d2e9477ea876851aab1e44f248aa
| 507
|
py
|
Python
|
src/bi/sub.py
|
vanceeasleaf/paper_bi4i4
|
d8ff15514316df43625b084959248f1824d6bbfa
|
[
"MIT"
] | null | null | null |
src/bi/sub.py
|
vanceeasleaf/paper_bi4i4
|
d8ff15514316df43625b084959248f1824d6bbfa
|
[
"MIT"
] | null | null | null |
src/bi/sub.py
|
vanceeasleaf/paper_bi4i4
|
d8ff15514316df43625b084959248f1824d6bbfa
|
[
"MIT"
] | null | null | null |
from aces import Aces
#the origin BP structure is optimized and we use it directly
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="Bi4I4_computed",
method="greenkubo",
nodes=4,
procs=12,
queue="q1.1",
runTime=10000000
,runner="shengbte"
)
app=dict(encut=520,th=True,useMini=True,shengcut=-4,kpoints=[8,8,8],engine='vasp',supercell=[3,2,2],mekpoints=[9,6,4],ekpoints=[3,3,2])
self.commit(opt,app);
if __name__=='__main__':
sub().run()
| 26.684211
| 138
| 0.65286
|
acfc20f2e06045c213664f4eb8afac1d810f584a
| 6,579
|
py
|
Python
|
test/test_index.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 161
|
2016-02-20T15:18:13.000Z
|
2022-03-28T11:55:32.000Z
|
test/test_index.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 387
|
2015-08-12T07:16:56.000Z
|
2022-03-30T14:27:12.000Z
|
test/test_index.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 20
|
2016-02-22T12:51:54.000Z
|
2022-01-30T22:54:08.000Z
|
import fiona
import numpy as np
import os
import pytest
import rasterio
import mapchete
from mapchete.index import zoom_index_gen
from mapchete.io import get_boto3_bucket
@pytest.mark.remote
def test_remote_indexes(mp_s3_tmpdir, gtiff_s3):
zoom = 7
gtiff_s3.dict.update(zoom_levels=zoom)
def gen_indexes_and_check():
# generate indexes
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
geojson=True,
txt=True,
vrt=True,
)
)
# assert GeoJSON exists
with fiona.open(
os.path.join(mp.config.output.path, "%s.geojson" % zoom)
) as src:
assert len(src) == 2
# assert TXT exists
txt_index = os.path.join(mp.config.output.path, "%s.txt" % zoom)
bucket = get_boto3_bucket(txt_index.split("/")[2])
key = "/".join(txt_index.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
content = obj.get()["Body"].read().decode()
assert len([l + "\n" for l in content.split("\n") if l]) == 2
# assert VRT exists
with rasterio.open(os.path.join(mp.config.output.path, "%s.vrt" % zoom)) as src:
assert src.read().any()
with mapchete.open(gtiff_s3.dict) as mp:
# write output data
mp.batch_process(zoom=zoom)
# generate indexes and check
gen_indexes_and_check()
# generate indexes again and assert nothing has changes
gen_indexes_and_check()
def test_vrt(mp_tmpdir, cleantopo_br):
zoom = 8
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
assert vrt.bounds == bounds
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
assert gdal_vrt.bounds == bounds
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
def test_vrt_mercator(mp_tmpdir, cleantopo_br_mercator):
zoom = 8
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br_mercator")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
assert gdal_vrt_data.any()
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
| 30.178899
| 88
| 0.556316
|
acfc212566463ac845d376f73914e7b88fd193a6
| 1,900
|
py
|
Python
|
slider-agent/src/main/python/resource_management/core/providers/package/apt.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | 60
|
2015-01-05T10:51:11.000Z
|
2018-12-15T03:48:09.000Z
|
slider-agent/src/main/python/resource_management/core/providers/package/apt.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | null | null | null |
slider-agent/src/main/python/resource_management/core/providers/package/apt.py
|
turningme/incubator-retired-slider
|
1d4f519d763210f46e327338be72efa99e65cb5d
|
[
"Apache-2.0"
] | 87
|
2015-01-14T05:14:15.000Z
|
2018-12-25T14:14:56.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Slider Agent
"""
from resource_management.core.providers.package import PackageProvider
from resource_management.core import shell
from resource_management.core.logger import Logger
INSTALL_CMD = "/usr/bin/apt-get --force-yes --assume-yes install %s"
REMOVE_CMD = "/usr/bin/apt-get -y -q remove %s"
CHECK_CMD = "dpkg --get-selections %s | grep -v deinstall"
class AptProvider(PackageProvider):
def install_package(self, name):
if not self._check_existence(name):
cmd = INSTALL_CMD % (name)
Logger.info("Installing package %s ('%s')" % (name, cmd))
shell.checked_call(cmd)
else:
Logger.info("Skipping installing existent package %s" % (name))
def upgrade_package(self, name):
return self.install_package(name)
def remove_package(self, name):
if self._check_existence(name):
cmd = REMOVE_CMD % (name)
Logger.info("Removing package %s ('%s')" % (name, cmd))
shell.checked_call(cmd)
else:
Logger.info("Skipping removing non-existent package %s" % (name))
def _check_existence(self, name):
code, out = shell.call(CHECK_CMD % name)
return not bool(code)
| 35.849057
| 72
| 0.736842
|
acfc230121acbd7880178691284b1008f2b95445
| 1,451
|
py
|
Python
|
src/utils/callbacks.py
|
sharath957/ANN-implementation-DLCVNLP-demo
|
c85fdb8734ab9fc73bf59012b47554b85a26cdde
|
[
"MIT"
] | null | null | null |
src/utils/callbacks.py
|
sharath957/ANN-implementation-DLCVNLP-demo
|
c85fdb8734ab9fc73bf59012b47554b85a26cdde
|
[
"MIT"
] | null | null | null |
src/utils/callbacks.py
|
sharath957/ANN-implementation-DLCVNLP-demo
|
c85fdb8734ab9fc73bf59012b47554b85a26cdde
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import os
import numpy as np
import time
def get_timestamp(name):
timestamp = time.asctime().replace(" ","_").replace(":","_")
unique_name = f"{name}_at_{timestamp}"
return unique_name
def get_callbacks(config, X_train):
logs = config['logs']
unique_dir_name = get_timestamp('tb_logs')
TENSORBOARD_ROOT_LOG_DIR = os.path.join(logs['logs_dir'],logs['TENSORBOARD_ROOT_LOG_DIR'],unique_dir_name)
os.makedirs(TENSORBOARD_ROOT_LOG_DIR, exist_ok=True)
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir=TENSORBOARD_ROOT_LOG_DIR)
file_writer = tf.summary.create_file_writer(logdir=TENSORBOARD_ROOT_LOG_DIR)
with file_writer.as_default():
images = np.reshape(X_train[10:30], (-1, 28, 28, 1)) ### <<< 20, 28, 28, 1
tf.summary.image("20 handritten digit samples", images, max_outputs=25, step=0)
params = config['params']
early_stopping_cb = tf.keras.callbacks.EarlyStopping(
patience= params['patience'], restore_best_weights=params['restore_best_weights'])
artifacts = config['artifacts']
CKPT_dir = os.path.join(artifacts['artifacts_dir'], artifacts['CHECKPOINT_DIR'])
os.makedirs(CKPT_dir, exist_ok=True)
CKPT_path = os.path.join(CKPT_dir, "model_ckpt.h5")
checkpointing_cb = tf.keras.callbacks.ModelCheckpoint(CKPT_path, save_best_only=True)
return [tensorboard_cb, early_stopping_cb, checkpointing_cb ]
| 37.205128
| 110
| 0.72295
|
acfc23f9ea827b83951d7b7cd523c92769d23ed2
| 5,223
|
py
|
Python
|
npword2vec/HuffmanTree.py
|
qiaoxiu/nlp
|
790234d559ed9d5cae5b10dd5013ebd8052b6db9
|
[
"Apache-2.0"
] | null | null | null |
npword2vec/HuffmanTree.py
|
qiaoxiu/nlp
|
790234d559ed9d5cae5b10dd5013ebd8052b6db9
|
[
"Apache-2.0"
] | null | null | null |
npword2vec/HuffmanTree.py
|
qiaoxiu/nlp
|
790234d559ed9d5cae5b10dd5013ebd8052b6db9
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'multiangle'
# 这是实现 霍夫曼树相关的文件, 主要用于 针对层次softmax进行 word2vec 优化方案的一种
'''
至于 为什么要进行层次softmax 可以简单理解 因为词表很大 针对上完个类别单词进行softmax 计算量大 更新参数过多 无法训练,而采用softmax 层次化 只需要 计算几个有限单词的sigmod 就可以 更新参数也非常少
提高训练速度
什么是霍夫曼树 简单理解就是 将训练文本 进行词频统计 通过构建加权最短路径来构造二叉树 这样 词频高的 位置在前 词频低的位置在后 每一个 霍夫曼编码代表一个词 路径 并且是唯一 不是其他词的前缀
'''
import numpy as np
class HuffmanTreeNode():
def __init__(self,value,possibility):
# common part of leaf node and tree node
# 词频概率,训练文本出现的次数
self.possibility = possibility
# 左右子节点
self.left = None
self.right = None
# value of leaf node will be the word, and be
# mid vector in tree node
# 叶节点是学习的词向量 非叶子节点是中间变量 即 wx 与 xite
self.value = value # the value of word
# 存储霍夫曼码
self.Huffman = "" # store the huffman code
def __str__(self):
return 'HuffmanTreeNode object, value: {v}, possibility: {p}, Huffman: {h}'\
.format(v=self.value,p=self.possibility,h=self.Huffman)
class HuffmanTree():
def __init__(self, word_dict, vec_len=15000):
self.vec_len = vec_len # the length of word vector
self.root = None
# 所有词汇
word_dict_list = list(word_dict.values())
# 根据所有词汇信息 创建节点
node_list = [HuffmanTreeNode(x['word'],x['possibility']) for x in word_dict_list]
# 构建霍夫曼树
self.build_tree(node_list)
# self.build_CBT(node_list)
# 生成霍夫曼树的霍夫曼编码
self.generate_huffman_code(self.root, word_dict)
def build_tree(self,node_list):
# node_list.sort(key=lambda x:x.possibility,reverse=True)
# for i in range(node_list.__len__()-1)[::-1]:
# top_node = self.merge(node_list[i],node_list[i+1])
# node_list.insert(i,top_node)
# self.root = node_list[0]
while node_list.__len__()>1:
i1 = 0 # i1表示概率最小的节点
i2 = 1 # i2 概率第二小的节点
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
for i in range(2,node_list.__len__()): # 找到最小的两个节点
if node_list[i].possibility<node_list[i2].possibility :
i2 = i
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
#根据 叶节点1 和叶节点2 生成叶节点 也就是中间变量 其中 用来 存放xite
top_node = self.merge(node_list[i1],node_list[i2])
# 删除节点1 和节点2 将 新生成的非叶节点进行 加入 以进行后续 循环构建霍夫曼树
if i1<i2:
node_list.pop(i2)
node_list.pop(i1)
elif i1>i2:
node_list.pop(i1)
node_list.pop(i2)
else:
raise RuntimeError('i1 should not be equal to i2')
node_list.insert(0,top_node)
self.root = node_list[0]
def build_CBT(self,node_list): # build a complete binary tree
node_list.sort(key=lambda x:x.possibility,reverse=True)
node_num = node_list.__len__()
before_start = 0
while node_num>1 :
for i in range(node_num>>1):
top_node = self.merge(node_list[before_start+i*2],node_list[before_start+i*2+1])
node_list.append(top_node)
if node_num%2==1:
top_node = self.merge(node_list[before_start+i*2+2],node_list[-1])
node_list[-1] = top_node
before_start = before_start + node_num
node_num = node_num>>1
self.root = node_list[-1]
def generate_huffman_code(self, node, word_dict):
# # use recursion in this edition
# if node.left==None and node.right==None :
# word = node.value
# code = node.Huffman
# print(word,code)
# word_dict[word]['Huffman'] = code
# return -1
#
# code = node.Huffman
# if code==None:
# code = ""
# node.left.Huffman = code + "1"
# node.right.Huffman = code + "0"
# self.generate_huffman_code(node.left, word_dict)
# self.generate_huffman_code(node.right, word_dict)
# use stack butnot recursion in this edition
# 左子树 编码是1 右子树 编码是0 先左子树 在右字数 设置编码链
stack = [self.root]
while (stack.__len__()>0):
node = stack.pop()
# go along left tree
while node.left or node.right :
code = node.Huffman
node.left.Huffman = code + "1"
node.right.Huffman = code + "0"
stack.append(node.right)
node = node.left
word = node.value
code = node.Huffman
# print(word,'\t',code.__len__(),'\t',node.possibility)
word_dict[word]['Huffman'] = code
def merge(self,node1,node2):
# 新生成的非叶节点的词频是 俩个叶节点的加和
top_pos = node1.possibility + node2.possibility
# 将非叶节点向量进行初始化
top_node = HuffmanTreeNode(np.zeros([1,self.vec_len]), top_pos)
if node1.possibility >= node2.possibility :
top_node.left = node1
top_node.right = node2
else:
top_node.left = node2
top_node.right = node1
return top_node
| 35.290541
| 118
| 0.573234
|
acfc243793537aba353d90cced1b60722abff595
| 114
|
py
|
Python
|
mdaas/__init__.py
|
shakedhi/MDaaS
|
4a0db31fffc2cba730f8b561a90098f59c8ef871
|
[
"MIT"
] | 2
|
2020-08-20T08:05:28.000Z
|
2020-10-13T02:02:35.000Z
|
mdaas/__init__.py
|
shakedhi/MDaaS
|
4a0db31fffc2cba730f8b561a90098f59c8ef871
|
[
"MIT"
] | null | null | null |
mdaas/__init__.py
|
shakedhi/MDaaS
|
4a0db31fffc2cba730f8b561a90098f59c8ef871
|
[
"MIT"
] | 1
|
2020-10-12T07:34:22.000Z
|
2020-10-12T07:34:22.000Z
|
from gym.envs.registration import register
register(
id='MDaaS-v1',
entry_point='mdaas.envs:MdaasEnv',
)
| 16.285714
| 42
| 0.719298
|
acfc246eb580fe44ac1a2f4166019e110ffa131c
| 315
|
py
|
Python
|
conversate/admin.py
|
radiac/django-conversate
|
dd302ce30c345c40b5b482b03bf6962a3663eb6e
|
[
"BSD-3-Clause"
] | 14
|
2015-08-07T16:12:36.000Z
|
2022-01-19T13:10:15.000Z
|
conversate/admin.py
|
radiac/django-conversate
|
dd302ce30c345c40b5b482b03bf6962a3663eb6e
|
[
"BSD-3-Clause"
] | 2
|
2018-12-23T09:44:59.000Z
|
2020-09-13T13:04:38.000Z
|
conversate/admin.py
|
radiac/django-conversate
|
dd302ce30c345c40b5b482b03bf6962a3663eb6e
|
[
"BSD-3-Clause"
] | 3
|
2017-11-22T16:41:44.000Z
|
2021-12-09T00:40:17.000Z
|
from django.contrib import admin
from . import models
class RoomUserAdmin(admin.TabularInline):
model = models.RoomUser
class RoomAdmin(admin.ModelAdmin):
list_display = [
"title",
"slug",
]
inlines = [
RoomUserAdmin,
]
admin.site.register(models.Room, RoomAdmin)
| 15
| 43
| 0.650794
|
acfc2587d6877d9604a21d212803c75005bbfd29
| 1,544
|
py
|
Python
|
6 kyu/Hex cipher.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/Hex cipher.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/Hex cipher.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
dict1={'0c': '\x0c', '6c': 'l', '6d': 'm', '54': 'T', '77': 'w', '79': 'y', '3e': '>', '2f': '/', '59': 'Y', '68': 'h', '6a': 'j', '63': 'c', '45': 'E', '46': 'F', '25': '%', '0a': '\n', '73': 's', '74': 't', '3f': '?', '4c': 'L', '3c': '<', '48': 'H', '3a': ':', '4e': 'N', '26': '&', '72': 'r', '4a': 'J', '62': 'b', '24': '$', '64': 'd', '49': 'I', '71': 'q', '3d': '=', '51': 'Q', '28': '(', '61': 'a', '7d': '}', '37': '7', '52': 'R', '23': '#', '41': 'A', '5a': 'Z', '78': 'x', '3b': ';', '09': '\t', '2a': '*', '65': 'e', '0d': '\r', '50': 'P', '30': '0', '7b': '{', '43': 'C', '4f': 'O', '0b': '\x0b', '32': '2', '7a': 'z', '2c': ',', '42': 'B', '7c': '|', '57': 'W', '47': 'G', '56': 'V', '27': "'", '34': '4', '21': '!', '76': 'v', '36': '6', '5e': '^', '33': '3', '4b': 'K', '29': ')', '2e': '.', '2d': '-', '67': 'g', '35': '5', '38': '8', '53': 'S', '44': 'D', '7e': '~', '6b': 'k', '40': '@', '39': '9', '5b': '[', '70': 'p', '66': 'f', '4d': 'M', '58': 'X', '31': '1', '20': ' ', '6f': 'o', '5f': '_', '69': 'i', '5c': '\\', '55': 'U', '5d': ']', '6e': 'n', '60': '`', '22': '"', '2b': '+', '75': 'u'}
class HexCipher:
@classmethod
def encode(cls, s, n):
for i in range(n):
temp=""
for char in s:
temp+=TEXT2HEX[char]
s=temp
return s
@classmethod
def decode(cls, s, n):
for i in range(n):
temp=""
for char in range(0, len(s), 2):
temp+=dict1[s[char:char+2]]
s=temp
return s
| 85.777778
| 1,116
| 0.299223
|
acfc25b50984787adb9faf99d1442c7f2a2f365a
| 2,849
|
py
|
Python
|
regression/locally_weighted_regression.py
|
romanorac/ml
|
a2b39e91145e1c5270020e943d3608b582ea2e0c
|
[
"Apache-2.0"
] | 4
|
2018-01-07T06:17:49.000Z
|
2021-03-01T13:01:11.000Z
|
regression/locally_weighted_regression.py
|
romanorac/machine-learning
|
a2b39e91145e1c5270020e943d3608b582ea2e0c
|
[
"Apache-2.0"
] | null | null | null |
regression/locally_weighted_regression.py
|
romanorac/machine-learning
|
a2b39e91145e1c5270020e943d3608b582ea2e0c
|
[
"Apache-2.0"
] | null | null | null |
"""
Locally Weighted Regression
References
----------
CS229 Lecture notes1, Chapter 3 Locally weighted linear regression, Prof. Andrew Ng
http://cs229.stanford.edu/notes/cs229-notes1.pdf
weighted least squares and locally weighted linear regression
http://www.dsplog.com/2012/02/05/weighted-least-squares-and-locally-weighted-linear-regression/
"""
import matplotlib.pyplot as plt
import numpy as np
from datasets import load
class LocallyWeightedRegression:
def __init__(self, samples, targets):
"""
:param samples: {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
:param targets: array-like, shape = [n_samples]
Target values.
"""
self.samples = samples
self.targets = targets
self.thetas = []
self.estimation = []
def fit_predict(self, estimation_samples, tau=1):
"""
fit estimation_samples with locally weighted regression according to samples and target
:param estimation_samples : {array-like, sparse matrix}, shape = [n_samples, n_features]
Estimation vectors, where n_samples is the number of samples and
n_features is the number of features.
:param tau : float, tau >= 0
The the bandwidth parameter tau controls how quickly the weight of a training example falls off with
distance of its x(i) from the query point x.
"""
if tau < 0:
print "tau should be greater than 0"
return [], []
self.thetas = []
self.estimation = []
for x in estimation_samples:
# calculate weights that depend on the particular vector x
weights = np.exp((-(self.samples - x) * (self.samples - x)).sum(axis=1) / (2 * tau ** 2))
diagonal_weights = np.diag(weights) # diagonal matrix with weights
samples_times_weights = np.dot(self.samples.T, diagonal_weights)
a = np.dot(samples_times_weights, self.samples)
b = np.dot(samples_times_weights, self.targets)
self.thetas.append(np.linalg.lstsq(a, b)[0]) # calculate thetas for given x with: A^-1 * b
self.estimation.append(np.dot(x, self.thetas[-1])) # calculate estimation for given x and thetas
if __name__ == '__main__':
samples, targets, _, _, _ = load.lwlr()
plt.scatter(samples[:, 1], targets) # plot train data
lwr = LocallyWeightedRegression(samples, targets)
taus = [1, 10, 25]
color = ["r", "g", "b"]
for i, tau in enumerate(taus):
lwr.fit_predict(samples, tau=tau)
plt.plot(samples[:, 1], lwr.estimation, c=color[i]) # plot estimations
plt.show()
| 37.486842
| 112
| 0.632152
|
acfc27c9ba7d6a34d4aa7bd82ec9887e791cedc4
| 2,869
|
py
|
Python
|
local/HTML/HTML_write.py
|
landongw/pdf-to-code
|
fa7612ec7b1364310d4686b731773cd4e201c7c2
|
[
"MIT"
] | 2
|
2018-11-14T13:21:03.000Z
|
2019-01-04T04:54:32.000Z
|
local/HTML/HTML_write.py
|
landongw/pdf-to-code
|
fa7612ec7b1364310d4686b731773cd4e201c7c2
|
[
"MIT"
] | null | null | null |
local/HTML/HTML_write.py
|
landongw/pdf-to-code
|
fa7612ec7b1364310d4686b731773cd4e201c7c2
|
[
"MIT"
] | null | null | null |
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.models import load_model
from keras.utils import to_categorical
from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense, Flatten
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
import numpy as np
# Load the saved model
model = load_model('model1.h5')
# Run the images through inception-resnet and extract the features without the classification layer
IR2 = InceptionResNetV2(weights='imagenet', include_top=False)
# Initialize the function that will create our vocabulary
tokenizer = Tokenizer(filters='', split=" ", lower=False)
# Read a document and return a string
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
# Load all the HTML files
X = []
all_filenames = listdir('html/')
all_filenames.sort()
for filename in all_filenames:
X.append(load_doc('html/'+filename))
# Create the vocabulary from the html files
tokenizer.fit_on_texts(X)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'START'
# iterate over the whole length of the sequence
for i in range(900):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0][-100:]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# Print the prediction
print(' ' + word, end='')
# stop if we predict the end of the sequence
if word == 'END':
break
return
# TODO: Create a separate main.py with below:
# Load and image, preprocess it for IR2, extract features and generate the HTML
test_image = img_to_array(load_img('images/90.jpg', target_size=(299, 299)))
test_image = np.array(test_image, dtype=float)
test_image = preprocess_input(test_image)
test_features = IR2.predict(np.array([test_image]))
generate_desc(model, tokenizer, np.array(test_features), 100)
| 35.8625
| 117
| 0.714883
|
acfc28025dd62c8cb7401e69ccd226470650b607
| 18,328
|
py
|
Python
|
ducktape/tests/test.py
|
stan-confluent/ducktape
|
e32c4347b3b538d2daa8551d13a67afd53b05ee1
|
[
"Apache-2.0"
] | null | null | null |
ducktape/tests/test.py
|
stan-confluent/ducktape
|
e32c4347b3b538d2daa8551d13a67afd53b05ee1
|
[
"Apache-2.0"
] | null | null | null |
ducktape/tests/test.py
|
stan-confluent/ducktape
|
e32c4347b3b538d2daa8551d13a67afd53b05ee1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from contextlib import contextmanager
import logging
import os
import re
import shutil
import sys
import tempfile
from ducktape.cluster.cluster_spec import ClusterSpec
from ducktape.tests.loggermaker import LoggerMaker, close_logger
from ducktape.utils.local_filesystem_utils import mkdir_p
from ducktape.command_line.defaults import ConsoleDefaults
from ducktape.services.service_registry import ServiceRegistry
from ducktape.template import TemplateRenderer
from ducktape.mark.resource import CLUSTER_SPEC_KEYWORD, CLUSTER_SIZE_KEYWORD
from ducktape.tests.status import FAIL
class Test(TemplateRenderer):
"""Base class for tests.
"""
def __init__(self, test_context, *args, **kwargs):
"""
:type test_context: ducktape.tests.test.TestContext
"""
super(Test, self).__init__(*args, **kwargs)
self.test_context = test_context
@property
def cluster(self):
return self.test_context.session_context.cluster
@property
def logger(self):
return self.test_context.logger
def min_cluster_spec(self):
"""
Returns a specification for the minimal cluster we need to run this test.
This method replaces the deprecated min_cluster_size. Unlike min_cluster_size, it can handle
non-Linux operating systems.
In general, most Tests don't need to override this method. The default implementation
seen here works well in most cases. However, the default implementation only takes into account
the services that exist at the time of the call. You may need to override this method if you add
new services during the course of your test.
:return: A ClusterSpec object.
"""
try:
# If the Test overrode the deprecated min_cluster_size method, we will use that.
num_linux_nodes = self.min_cluster_size()
return ClusterSpec.simple_linux(num_linux_nodes)
except NotImplementedError:
# Otherwise, ask the service registry what kind of cluster spec we need for currently
# extant services.
return self.test_context.services.min_cluster_spec()
def min_cluster_size(self):
"""
Returns the number of linux nodes which this test needs.
THIS METHOD IS DEPRECATED, and provided only for backwards compatibility.
Please implement min_cluster_spec instead.
:return: An integer.
"""
raise NotImplementedError
def setup(self):
"""Override this for custom setup logic."""
# for backward compatibility
self.setUp()
def teardown(self):
"""Override this for custom teardown logic."""
# for backward compatibility
self.tearDown()
def setUp(self):
pass
def tearDown(self):
pass
def free_nodes(self):
try:
self.test_context.services.free_all()
except BaseException as e:
if isinstance(e, KeyboardInterrupt):
raise
def compress_service_logs(self, node, service, node_logs):
"""Compress logs on a node corresponding to the given service.
:param node: The node on which to compress the given logs
:param service: The service to which the node belongs
:param node_logs: Paths to logs (or log directories) which will be compressed
:return: a list of paths to compressed logs.
"""
compressed_logs = []
for nlog in node_logs:
try:
node.account.ssh(_compress_cmd(nlog))
if nlog.endswith(os.path.sep):
nlog = nlog[:-len(os.path.sep)]
nlog += ".tgz"
compressed_logs.append(nlog)
except Exception as e:
self.test_context.logger.warn(
"Error compressing log %s: service %s: %s" % (nlog, service, str(e))
)
return compressed_logs
def copy_service_logs(self, test_status):
"""
Copy logs from service nodes to the results directory.
If the test passed, only the default set will be collected. If the the test failed, all logs will be collected.
"""
for service in self.test_context.services:
if not hasattr(service, 'logs') or len(service.logs) == 0:
self.test_context.logger.debug("Won't collect service logs from %s - no logs to collect." %
service.service_id)
continue
log_dirs = service.logs
for node in service.nodes:
# Gather locations of logs to collect
node_logs = []
for log_name in log_dirs.keys():
if test_status == FAIL or self.should_collect_log(log_name, service):
node_logs.append(log_dirs[log_name]["path"])
self.test_context.logger.debug("Preparing to copy logs from %s: %s" %
(node.account.hostname, node_logs))
if self.test_context.session_context.compress:
self.test_context.logger.debug("Compressing logs...")
node_logs = self.compress_service_logs(node, service, node_logs)
if len(node_logs) > 0:
# Create directory into which service logs will be copied
dest = os.path.join(
TestContext.results_dir(self.test_context, self.test_context.test_index),
service.service_id, node.account.hostname)
if not os.path.isdir(dest):
mkdir_p(dest)
# Try to copy the service logs
self.test_context.logger.debug("Copying logs...")
try:
for log in node_logs:
node.account.copy_from(log, dest)
except Exception as e:
self.test_context.logger.warn(
"Error copying log %(log_name)s from %(source)s to %(dest)s. \
service %(service)s: %(message)s" %
{'log_name': log_name,
'source': log_dirs[log_name],
'dest': dest,
'service': service,
'message': e.message})
def mark_for_collect(self, service, log_name=None):
if log_name is None:
# Mark every log for collection
for log_name in service.logs:
self.test_context.log_collect[(log_name, service)] = True
else:
self.test_context.log_collect[(log_name, service)] = True
def mark_no_collect(self, service, log_name=None):
self.test_context.log_collect[(log_name, service)] = False
def should_collect_log(self, log_name, service):
key = (log_name, service)
default = service.logs[log_name]["collect_default"]
val = self.test_context.log_collect.get(key, default)
return val
def _compress_cmd(log_path):
"""Return bash command which compresses the given path to a tarball."""
compres_cmd = 'cd "$(dirname %s)" && ' % log_path
compres_cmd += 'f="$(basename %s)" && ' % log_path
compres_cmd += 'if [ -e "$f" ]; then tar czf "$f.tgz" "$f"; fi && '
compres_cmd += 'rm -rf %s' % log_path
return compres_cmd
def _escape_pathname(s):
"""Remove fishy characters, replace most with dots"""
# Remove all whitespace completely
s = re.sub(r"\s+", "", s)
# Replace bad characters with dots
blacklist = r"[^\.\-=_\w\d]+"
s = re.sub(blacklist, ".", s)
# Multiple dots -> single dot (and no leading or trailing dot)
s = re.sub(r"[\.]+", ".", s)
return re.sub(r"^\.|\.$", "", s)
def test_logger(logger_name, log_dir, debug):
"""Helper method for getting a test logger object
Note that if this method is called multiple times with the same ``logger_name``, it returns the same logger object.
Note also, that for a fixed ``logger_name``, configuration occurs only the first time this function is called.
"""
return TestLoggerMaker(logger_name, log_dir, debug).logger
class TestLoggerMaker(LoggerMaker):
def __init__(self, logger_name, log_dir, debug):
super(TestLoggerMaker, self).__init__(logger_name)
self.log_dir = log_dir
self.debug = debug
def configure_logger(self):
"""Set up the logger to log to stdout and files.
This creates a directory and a few files as a side-effect.
"""
if self.configured:
return
self._logger.setLevel(logging.DEBUG)
mkdir_p(self.log_dir)
# Create info and debug level handlers to pipe to log files
info_fh = logging.FileHandler(os.path.join(self.log_dir, "test_log.info"))
debug_fh = logging.FileHandler(os.path.join(self.log_dir, "test_log.debug"))
info_fh.setLevel(logging.INFO)
debug_fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(ConsoleDefaults.TEST_LOG_FORMATTER)
info_fh.setFormatter(formatter)
debug_fh.setFormatter(formatter)
self._logger.addHandler(info_fh)
self._logger.addHandler(debug_fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
if self.debug:
# If debug flag is set, pipe debug logs to stdout
ch.setLevel(logging.DEBUG)
else:
# default - pipe warning level logging to stdout
ch.setLevel(logging.WARNING)
self._logger.addHandler(ch)
class TestContext(object):
"""Wrapper class for state variables needed to properly run a single 'test unit'."""
def __init__(self, **kwargs):
"""
:param session_context:
:param cluster: the cluster object which will be used by this test
:param module: name of the module containing the test class/method
:param cls: class object containing the test method
:param function: the test method
:param file: file containing this module
:param injected_args: a dict containing keyword args which will be passed to the test method
:param cluster_use_metadata: dict containing information about how this test will use cluster resources
"""
self.session_context = kwargs.get("session_context")
self.cluster = kwargs.get("cluster")
self.module = kwargs.get("module")
if kwargs.get("file") is not None:
self.file = os.path.abspath(kwargs.get("file"))
else:
self.file = None
self.cls = kwargs.get("cls")
self.function = kwargs.get("function")
self.injected_args = kwargs.get("injected_args")
self.ignore = kwargs.get("ignore", False)
# cluster_use_metadata is a dict containing information about how this test will use cluster resources
self.cluster_use_metadata = copy.copy(kwargs.get("cluster_use_metadata", {}))
self.services = ServiceRegistry()
self.test_index = None
# dict for toggling service log collection on/off
self.log_collect = {}
self._logger = None
self._local_scratch_dir = None
def __repr__(self):
return \
"<module=%s, cls=%s, function=%s, injected_args=%s, file=%s, ignore=%s, " \
"cluster_size=%s, cluster_spec=%s>" % \
(self.module, self.cls_name, self.function_name, str(self.injected_args), str(self.file),
str(self.ignore), str(self.expected_num_nodes), str(self.expected_cluster_spec))
def copy(self, **kwargs):
"""Construct a new TestContext object from another TestContext object
Note that this is not a true copy, since a fresh ServiceRegistry instance will be created.
"""
ctx_copy = TestContext(**self.__dict__)
ctx_copy.__dict__.update(**kwargs)
return ctx_copy
@property
def local_scratch_dir(self):
"""This local scratch directory is created/destroyed on the test driver before/after each test is run."""
if not self._local_scratch_dir:
self._local_scratch_dir = tempfile.mkdtemp()
return self._local_scratch_dir
@property
def test_metadata(self):
return {
"directory": os.path.dirname(self.file),
"file_name": os.path.basename(self.file),
"cls_name": self.cls.__name__,
"method_name": self.function.__name__,
"injected_args": self.injected_args
}
@staticmethod
def logger_name(test_context, test_index):
if test_index is None:
return test_context.test_id
else:
return "%s-%s" % (test_context.test_id, str(test_index))
@staticmethod
def results_dir(test_context, test_index):
d = test_context.session_context.results_dir
if test_context.cls is not None:
d = os.path.join(d, test_context.cls.__name__)
if test_context.function is not None:
d = os.path.join(d, test_context.function.__name__)
if test_context.injected_args is not None:
d = os.path.join(d, test_context.injected_args_name)
if test_index is not None:
d = os.path.join(d, str(test_index))
return d
@property
def expected_num_nodes(self):
"""
How many nodes of any type we expect this test to consume when run.
:return: an integer number of nodes.
"""
return self.expected_cluster_spec.size()
@property
def expected_cluster_spec(self):
"""
The cluster spec we expect this test to consume when run.
:return: A ClusterSpec object.
"""
cluster_spec = self.cluster_use_metadata.get(CLUSTER_SPEC_KEYWORD)
cluster_size = self.cluster_use_metadata.get(CLUSTER_SIZE_KEYWORD)
if cluster_spec is not None:
return cluster_spec
elif cluster_size is not None:
return ClusterSpec.simple_linux(cluster_size)
elif self.cluster is None:
return ClusterSpec.empty()
else:
return self.cluster.all()
@property
def globals(self):
return self.session_context.globals
@property
def module_name(self):
return "" if self.module is None else self.module
@property
def cls_name(self):
return "" if self.cls is None else self.cls.__name__
@property
def function_name(self):
return "" if self.function is None else self.function.__name__
@property
def description(self):
"""Description of the test, needed in particular for reporting.
If the function has a docstring, return that, otherwise return the class docstring or "".
"""
if self.function.__doc__:
return self.function.__doc__
elif self.cls.__doc__ is not None:
return self.cls.__doc__
else:
return ""
@property
def injected_args_name(self):
if self.injected_args is None:
return ""
else:
params = ".".join(["%s=%s" % (k, self.injected_args[k]) for k in self.injected_args])
return _escape_pathname(params)
@property
def test_id(self):
return self.test_name
@property
def test_name(self):
"""
The fully-qualified name of the test. This is similar to test_id, but does not include the session ID. It
includes the module, class, and method name.
"""
name_components = [self.module_name,
self.cls_name,
self.function_name,
self.injected_args_name]
return ".".join(filter(lambda x: x is not None and len(x) > 0, name_components))
@property
def logger(self):
if self._logger is None:
self._logger = test_logger(
TestContext.logger_name(self, self.test_index),
TestContext.results_dir(self, self.test_index),
self.session_context.debug)
return self._logger
def close(self):
"""Release resources, etc."""
if hasattr(self, "services"):
for service in self.services:
service.close()
# Remove reference to services. This is important to prevent potential memory leaks if users write services
# which themselves have references to large memory-intensive objects
del self.services
# Remove local scratch directory
if self._local_scratch_dir and os.path.exists(self._local_scratch_dir):
shutil.rmtree(self._local_scratch_dir)
# Release file handles held by logger
if self._logger:
close_logger(self._logger)
@contextmanager
def in_dir(path):
""" Changes working directory to given path. On exit, restore to original working directory. """
cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
@contextmanager
def in_temp_dir():
""" Creates a temporary directory as the working directory. On exit, it is removed. """
with _new_temp_dir() as tmpdir:
with in_dir(tmpdir):
yield tmpdir
@contextmanager
def _new_temp_dir():
""" Create a temporary directory that is removed automatically """
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
| 35.796875
| 119
| 0.621781
|
acfc2808381eb8fd281187e1afd3c018721530c2
| 1,542
|
py
|
Python
|
tests/integration/aio/test_concurrency.py
|
krisgeus/neo4j-python-driver
|
bdf6631702a4552253ab616055c47f9ab90c5d7a
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/aio/test_concurrency.py
|
krisgeus/neo4j-python-driver
|
bdf6631702a4552253ab616055c47f9ab90c5d7a
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/aio/test_concurrency.py
|
krisgeus/neo4j-python-driver
|
bdf6631702a4552253ab616055c47f9ab90c5d7a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio import sleep, wait
from random import random
from pytest import mark
async def _run_queries(bolt_pool, d, values):
cx = await bolt_pool.acquire(force_reset=True)
for x in values:
await sleep(random())
result = await cx.run("RETURN $x", {"x": x})
record = await result.single()
assert record[0] == x
d.append(x)
await bolt_pool.release(cx, force_reset=True)
async def _run_tasks(bolt_pool, n_tasks, n_queries):
x_range = range(n_queries)
y_range = range(n_tasks)
data = [list() for _ in y_range]
cos = {_run_queries(bolt_pool, d, x_range) for d in data}
await wait(cos)
for d in data:
assert d == list(x_range)
@mark.asyncio
async def test_bolt_pool_should_allow_concurrent_async_usage(bolt_pool):
await _run_tasks(bolt_pool, 10, 50)
| 29.653846
| 74
| 0.701686
|
acfc2874704c973c61f1a085d8e0340453743e11
| 1,294
|
py
|
Python
|
test/test_api/test_payees.py
|
quinnhosler/ynab-sdk-python
|
4ef8040bb44216212a84c8990329dcf63972e0fa
|
[
"Apache-2.0"
] | null | null | null |
test/test_api/test_payees.py
|
quinnhosler/ynab-sdk-python
|
4ef8040bb44216212a84c8990329dcf63972e0fa
|
[
"Apache-2.0"
] | null | null | null |
test/test_api/test_payees.py
|
quinnhosler/ynab-sdk-python
|
4ef8040bb44216212a84c8990329dcf63972e0fa
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from kgb import SpyAgency
import test.support.fixtures.payees as payee_fixtures
from test.support.dummy_client import DummyClient
from test.support.mock import build_get_mock
from ynab_sdk import YNAB
from ynab_sdk.api.models.responses.payee import PayeeResponse
from ynab_sdk.api.models.responses.payees import PayeesResponse
class PayeesTest(SpyAgency, TestCase):
ynab: YNAB
client: DummyClient
def setUp(self):
self.client = DummyClient()
self.ynab = YNAB(client=self.client)
def test_get_payees_with_success(self):
spy = self.spy_on(self.client.get, call_fake=build_get_mock(payee_fixtures.VALID_PAYEES))
payees = self.ynab.payees.get_payees('some-budget')
self.assertTrue(spy.called_with('/budgets/some-budget/payees'))
self.assertIsNotNone(payees)
self.assertIsInstance(payees, PayeesResponse)
def test_get_payee_with_success(self):
spy = self.spy_on(self.client.get, call_fake=build_get_mock(payee_fixtures.VALID_PAYEE))
payee = self.ynab.payees.get_payee('some-budget', 'some-payee')
self.assertTrue(spy.called_with('/budgets/some-budget/payees/some-payee'))
self.assertIsNotNone(payee)
self.assertIsInstance(payee, PayeeResponse)
| 35.944444
| 97
| 0.750386
|
acfc28779ade7ec8d9a2ae7cf53fd2c8ca0c6880
| 10,374
|
py
|
Python
|
stage/standard/test_postgresql_metadata_processor.py
|
streamsets/datacollector-tests
|
6c3e908768e1d4a586e9183e2141096921ecd5be
|
[
"Apache-2.0"
] | 14
|
2019-03-04T10:12:39.000Z
|
2021-11-24T16:17:09.000Z
|
stage/standard/test_postgresql_metadata_processor.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 48
|
2019-03-08T14:59:06.000Z
|
2021-08-13T14:49:56.000Z
|
stage/standard/test_postgresql_metadata_processor.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 23
|
2018-09-24T20:49:17.000Z
|
2021-11-24T16:17:11.000Z
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
import json
import decimal
import datetime
import pytest
from streamsets.testframework.markers import database, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
DATA_TYPES = [
('true', 'BOOLEAN', True, 'boolean'),
('a', 'CHAR', 'a', 'character'),
# ('a', 'BYTE', None, 'something'), # Not supported
(120, 'SHORT', 120, 'smallint'),
(120, 'INTEGER', 120, 'integer'),
(120, 'LONG', 120, 'bigint'),
(20.1, 'FLOAT', 20.1, 'real'),
(20.1, 'DOUBLE', 20.1, 'double precision'),
(20.1, 'DECIMAL', decimal.Decimal('20.10'), 'numeric'),
('2020-01-01 10:00:00', 'DATE', datetime.datetime(2020, 1, 1, 10, 0), 'date'),
('2020-01-01 10:00:00', 'TIME', datetime.datetime(2020, 1, 1, 10, 0), 'time without time zone'),
('2020-01-01 10:00:00', 'DATETIME', datetime.datetime(2020, 1, 1, 10, 0), 'timestamp without time zone'),
("2020-01-01T10:00:00+00:00", 'ZONED_DATETIME', '2020-01-01T10:00:00Z', 'timestamp with time zone'),
('string', 'STRING', 'string', 'character varying'),
('string', 'BYTE_ARRAY', b'string', 'bytea'),
]
@database('postgresql')
@pytest.mark.parametrize('input,converter_type,expected_value,expected_type', DATA_TYPES, ids=[i[1] for i in DATA_TYPES])
def test_data_types(sdc_builder, sdc_executor, database, input, converter_type, expected_value, expected_type, keep_data):
connection = database.engine.connect()
table_name = get_random_string(string.ascii_letters, 10).lower()
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Dev Raw Data Source')
origin.data_format = 'JSON'
origin.stop_after_first_batch = True
origin.raw_data = json.dumps({"value": input })
converter = builder.add_stage('Field Type Converter')
converter.conversion_method = 'BY_FIELD'
converter.field_type_converter_configs = [{
'fields': ['/value'],
'targetType': converter_type,
'dataLocale': 'en,US',
'dateFormat': 'YYYY_MM_DD_HH_MM_SS',
'zonedDateTimeFormat': 'ISO_OFFSET_DATE_TIME',
'scale': 2
}]
expression = builder.add_stage('Expression Evaluator')
expression.field_attribute_expressions = [{
"fieldToSet": "/value",
"attributeToSet": "precision",
"fieldAttributeExpression": "5"
},{
"fieldToSet": "/value",
"attributeToSet": "scale",
"fieldAttributeExpression": "5"
}]
processor = builder.add_stage('PostgreSQL Metadata')
processor.table_name = table_name
wiretap = builder.add_wiretap()
origin >> converter >> expression >> processor >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
pipeline.configuration["shouldRetry"] = False
sdc_executor.add_pipeline(pipeline)
try:
# 1) Run the pipeline for the first time
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# 1.1) We should create table in PostgreSQL
rs = connection.execute(f"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '{table_name}'")
rows = [row for row in rs]
assert len(rows) == 1
assert rows[0][0] == 'value'
assert rows[0][1] == expected_type
# 1.2) The pipeline should output one record that is unchanged
output = wiretap.output_records
assert len(output) == 1
assert output[0].field['value'] == expected_value
# Intermezzo - need to reset wiretap
wiretap.reset()
# 2) Let's run the pipeline again, this time the table already exists in the database
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# 2.1) So the pipeline should just output the same (unchanged) record and be done
output = wiretap.output_records
assert len(output) == 1
assert output[0].field['value'] == expected_value
finally:
if not keep_data:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE IF EXISTS \"{table_name}\"")
# Rules: https://www.postgresql.org/docs/9.1/sql-syntax-lexical.html
# The processor was written to automatically lowercase all tables names, why? I have no clue, but can't easily
# change it without breaking backward compatibility.
OBJECT_NAMES = [
('keywords', 'table', 'column'),
('lowercase', get_random_string(string.ascii_lowercase, 20), get_random_string(string.ascii_lowercase, 20)),
('uppercase', get_random_string(string.ascii_uppercase, 20), get_random_string(string.ascii_uppercase, 20)),
('mixedcase', get_random_string(string.ascii_letters, 20), get_random_string(string.ascii_letters, 20)),
('max_table_name', get_random_string(string.ascii_lowercase, 63), get_random_string(string.ascii_lowercase, 20)),
('max_column_name', get_random_string(string.ascii_lowercase, 20), get_random_string(string.ascii_lowercase, 63)),
('numbers', get_random_string(string.ascii_lowercase, 5) + "0123456789", get_random_string(string.ascii_lowercase, 5) + "0123456789"),
('special', get_random_string(string.ascii_lowercase, 5) + "$_", get_random_string(string.ascii_lowercase, 5) + "$_"),
]
@database('postgresql')
@sdc_min_version('3.20.0')
@pytest.mark.parametrize('test_name,table_name,column_name', OBJECT_NAMES, ids=[i[0] for i in OBJECT_NAMES])
def test_object_names(sdc_builder, sdc_executor, test_name, table_name, column_name, database, keep_data):
connection = database.engine.connect()
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.data_format = 'JSON'
source.raw_data = f'{{ "{column_name}" : 1 }}'
source.stop_after_first_batch = True
processor = builder.add_stage('PostgreSQL Metadata')
processor.table_name = table_name
trash = builder.add_stage('Trash')
source >> processor >> trash
pipeline = builder.build().configure_for_environment(database)
try:
sdc_executor.add_pipeline(pipeline)
# Run the pipeline for the first time
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# 1.1) We should create table in PostgreSQL
rs = connection.execute(f"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '{table_name}'")
rows = [row for row in rs]
assert len(rows) == 1
assert rows[0][0] == column_name
assert rows[0][1] == 'integer'
# Run the pipeline for the second time
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# 2.1) No errors should be generated
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 0
finally:
if not keep_data:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE IF EXISTS \"{table_name}\"")
@database('postgresql')
def test_multiple_batches(sdc_builder, sdc_executor, database, keep_data):
table_prefix = get_random_string(string.ascii_letters, 10).lower()
connection = database.engine.connect()
builder = sdc_builder.get_pipeline_builder()
batch_size = 100
batches = 100
origin = builder.add_stage('Dev Data Generator')
origin.batch_size = batch_size
origin.delay_between_batches = 0
origin.fields_to_generate = [{
"type": "LONG_SEQUENCE",
"field": "seq"
}]
# We create 100 tables and iterate over them over and over again
processor = builder.add_stage('PostgreSQL Metadata')
processor.table_name = table_prefix + "_${record:value('/seq') % 100}"
trash = builder.add_stage('Trash')
origin >> processor >> trash
pipeline = builder.build().configure_for_environment(database)
try:
sdc_executor.add_pipeline(pipeline)
# Run the pipeline for the first time
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', batch_size * batches, timeout_sec=300)
sdc_executor.stop_pipeline(pipeline)
# 1.1) We should have 100 tables all with a single column
rs = connection.execute(f"SELECT table_name column_name FROM information_schema.columns WHERE table_name LIKE '{table_prefix}_%%' order by table_name ASC")
rows = [row[0] for row in rs]
assert len(rows) == 100
expected = sorted([f"{table_prefix}_{i}" for i in range(0, 100)])
assert expected == rows
finally:
if not keep_data:
for i in range(0, 100):
table_name = table_prefix + "_" + str(i)
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE IF EXISTS \"{table_name}\"")
@database('postgresql')
def test_dataflow_events(sdc_builder, sdc_executor, database):
pytest.skip("No events supported in PostgreSQL Metadata Processor at this time.")
@database('postgresql')
def test_data_format(sdc_builder, sdc_executor, database, keep_data):
pytest.skip("PostgreSQL Metadata Processor doesn't deal with data formats")
@database('postgresql')
def test_push_pull(sdc_builder, sdc_executor, database):
pytest.skip("We haven't re-implemented this test since Dev Data Generator (push) is art of test_multiple_batches and Dev Raw Data Source (pull) is part of test_data_types.")
| 42.516393
| 177
| 0.693561
|
acfc28d4e3f06bd7abd31055dde53b2ec8397f28
| 62
|
py
|
Python
|
pytorch_toolbelt/__init__.py
|
anshulrai/pytorch-toolbelt
|
933d59eb5d0916170b3d467f68af615064dbf7a1
|
[
"MIT"
] | null | null | null |
pytorch_toolbelt/__init__.py
|
anshulrai/pytorch-toolbelt
|
933d59eb5d0916170b3d467f68af615064dbf7a1
|
[
"MIT"
] | null | null | null |
pytorch_toolbelt/__init__.py
|
anshulrai/pytorch-toolbelt
|
933d59eb5d0916170b3d467f68af615064dbf7a1
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
__version__ = "0.3.1"
| 15.5
| 38
| 0.790323
|
acfc2b91e731c16029b8ab364d0951011b09afc6
| 1,748
|
py
|
Python
|
templates/dags/airflow_seed_dag.py
|
audience-platform/terraform-aws-ecs-airflow
|
e8ec5aa203b961ba4d353fa6a55152dd603a3593
|
[
"MIT"
] | null | null | null |
templates/dags/airflow_seed_dag.py
|
audience-platform/terraform-aws-ecs-airflow
|
e8ec5aa203b961ba4d353fa6a55152dd603a3593
|
[
"MIT"
] | null | null | null |
templates/dags/airflow_seed_dag.py
|
audience-platform/terraform-aws-ecs-airflow
|
e8ec5aa203b961ba4d353fa6a55152dd603a3593
|
[
"MIT"
] | null | null | null |
import os
from os import listdir
from os.path import isfile, join
import datetime
from typing import Dict
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
import boto3
# The bucket name and key the of where dags are stored in S3
S3_BUCKET_NAME = "${BUCKET_NAME}"
# airflow home directory where dags & plugins reside
AIRFLOW_HOME = "${AIRFLOW_HOME}"
args = {
"start_date": datetime.datetime(${YEAR}, ${MONTH}, ${DAY}),
}
# we prefix the dag with '0' to make it the first dag
with DAG(
dag_id="0_sync_dags_in_s3_to_local_airflow_dags_folder",
default_args=args,
schedule_interval=None
) as dag:
list_dags_before = BashOperator(
task_id="list_dags_before",
bash_command="find ${AIRFLOW_HOME}/dags -not -path '*__pycache__*'",
)
sync_dags = BashOperator(
task_id="sync_dag_s3_to_airflow",
bash_command=f"python -m awscli s3 sync --include='*' --size-only --delete s3://{S3_BUCKET_NAME}/dags/ {AIRFLOW_HOME}/dags/"
)
sync_plugins = BashOperator(
task_id="sync_plugins_s3_to_airflow",
bash_command=f"python -m awscli s3 sync --include='*' --size-only --delete s3://{S3_BUCKET_NAME}/plugins/ {AIRFLOW_HOME}/plugins/"
)
refresh_dag_bag = BashOperator(
task_id="refresh_dag_bag",
bash_command="python -c 'from airflow.models import DagBag; d = DagBag();'",
)
list_dags_after = BashOperator(
task_id="list_dags_after",
bash_command="find ${AIRFLOW_HOME}/dags -not -path '*__pycache__*'",
)
(
list_dags_before >>
[sync_dags, sync_plugins] >>
refresh_dag_bag >>
list_dags_after
)
| 29.133333
| 138
| 0.688787
|
acfc2bb26840be44da56b789641ea907de293b37
| 414
|
py
|
Python
|
books_toscrape_com/books_toscrape_com/items.py
|
JDTheRipperPC/toscrape-examples
|
83bd357da119a84c8f207dda899e73b1d4cc4f25
|
[
"MIT"
] | null | null | null |
books_toscrape_com/books_toscrape_com/items.py
|
JDTheRipperPC/toscrape-examples
|
83bd357da119a84c8f207dda899e73b1d4cc4f25
|
[
"MIT"
] | null | null | null |
books_toscrape_com/books_toscrape_com/items.py
|
JDTheRipperPC/toscrape-examples
|
83bd357da119a84c8f207dda899e73b1d4cc4f25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class BookCategory(scrapy.Item):
href = scrapy.Field()
name = scrapy.Field()
category = scrapy.Field()
class BooksToscrapeComItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.714286
| 53
| 0.683575
|
acfc2be0c27e3d0160ef5e2ea2b7fe2ad0d97304
| 766
|
py
|
Python
|
baekjoon/python/permutation_cycle_10451.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
baekjoon/python/permutation_cycle_10451.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | 1
|
2019-11-04T06:44:04.000Z
|
2019-11-04T06:46:55.000Z
|
baekjoon/python/permutation_cycle_10451.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
# Title: 순열 사이클
# Link: https://www.acmicpc.net/problem/10451
import sys
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def get_permutation_count(l, N):
visits = [False for _ in range(N+1)]
count = 0
for i in range(1, N+1):
if visits[i]:
continue
visits[i] = True
n = l[i]
while not visits[n]:
visits[n] = True
n = l[n]
count += 1
return count
if __name__ == '__main__':
T = read_single_int()
for _ in range(T):
N = read_single_int()
l = read_list_int()
print(get_permutation_count([0]+l, N))
| 20.702703
| 66
| 0.56658
|
acfc2c3a23cc27ecc5e262b02cc5f4248a82ff46
| 3,954
|
py
|
Python
|
pirates/effects/ThrowDirt.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/effects/ThrowDirt.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/effects/ThrowDirt.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.effects.ThrowDirt
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class ThrowDirt(PooledEffect, EffectController):
__module__ = __name__
cardScale = 128.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleRockShower')
self.speed = 20.0
if not ThrowDirt.particleDummy:
ThrowDirt.particleDummy = render.attachNewNode(ModelNode('ThrowDirtParticleDummy'))
ThrowDirt.particleDummy.setDepthWrite(0)
ThrowDirt.particleDummy.setLightOff()
ThrowDirt.particleDummy.setColorScaleOff()
ThrowDirt.particleDummy.setFogOff()
self.f = ParticleEffect.ParticleEffect('ThrowDirt')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('DiscEmitter')
self.f.addParticles(self.p0)
self.f0 = ForceGroup.ForceGroup('Grav')
force0 = LinearVectorForce(Vec3(0.0, -1.0, -20.0), 1.0, 1)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
self.f0.addForce(force0)
self.f.addForceGroup(self.f0)
self.p0.setPoolSize(32)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(2.0)
self.p0.factory.setLifespanSpread(1.0)
self.p0.factory.setMassBase(0.4)
self.p0.factory.setMassSpread(0.35)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(0.3, 0.2, 0, 1))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.004 * self.cardScale)
self.p0.renderer.setFinalXScale(0.008 * self.cardScale)
self.p0.renderer.setInitialYScale(0.004 * self.cardScale)
self.p0.renderer.setFinalYScale(0.008 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(1.5)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 10.0, 20.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(1.0)
def createTrack(self):
self.track = Sequence(Func(self.p0.setBirthRate, 0.02), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy), Wait(0.3), Func(self.p0.setBirthRate, 100), Wait(7.0), Func(self.cleanUpEffect))
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| 45.448276
| 220
| 0.694487
|
acfc2cc8babe25873276f9f41bb42670cd05216e
| 16,396
|
py
|
Python
|
nova/tests/api/openstack/compute/contrib/test_volumes.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/contrib/test_volumes.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/compute/contrib/test_volumes.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
import nova
from nova.api.openstack.compute.contrib import volumes
from nova.compute import instance_types
from nova import context
import nova.db
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import volume
from webob import exc
FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
global _block_device_mapping_seen
_block_device_mapping_seen = kwargs.get('block_device_mapping')
inst_type = instance_types.get_instance_type_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
def fake_get_instance(self, context, instance_id):
return({'uuid': instance_id})
def fake_attach_volume(self, context, instance, volume_id, device):
return()
def fake_detach_volume(self, context, volume_id):
return()
def fake_get_instance_bdms(self, context, instance):
return([{'id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1},
{'id': 2,
'instance_uuid':instance['uuid'],
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1}])
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(nova.compute.API, 'create', fake_compute_api_create)
fakes.stub_out_nw_api(self.stubs)
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id=1,
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
global _block_device_mapping_seen
_block_device_mapping_seen = None
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(len(_block_device_mapping_seen), 1)
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(_block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def return_volume(context, volume_id):
return {'id': volume_id}
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.db, 'volume_get', return_volume)
self.stubs.Set(volume.api.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get)
self.stubs.Set(volume.api.API, "get_all", fakes.stub_volume_get_all)
self.context = context.get_admin_context()
def test_volume_create(self):
self.stubs.Set(volume.api.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = webob.Request.blank('/v2/fake/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertTrue('volume' in resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def test_volume_create_no_body(self):
req = webob.Request.blank('/v2/fake/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
def test_volume_index(self):
req = webob.Request.blank('/v2/fake/os-volumes')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = webob.Request.blank('/v2/fake/os-volumes/detail')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
def test_volume_delete(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
class VolumeAttachTests(test.TestCase):
def setUp(self):
super(VolumeAttachTests, self).setUp()
self.stubs.Set(nova.compute.API,
'get_instance_bdms',
fake_get_instance_bdms)
self.stubs.Set(nova.compute.API, 'get', fake_get_instance)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
def test_show(self):
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
def test_delete(self):
self.stubs.Set(nova.compute.API, 'detach_volume', fake_detach_volume)
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual('202 Accepted', result.status)
def test_delete_vol_not_found(self):
self.stubs.Set(nova.compute.API, 'detach_volume', fake_detach_volume)
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
def test_attach_volume(self):
self.stubs.Set(nova.compute.API, 'attach_volume', fake_attach_volume)
attachments = volumes.VolumeAttachmentController()
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/fake/os-volumes/attach')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID, body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volumeId', 'serverId', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, 'volume')
for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
'displayName', 'displayDescription', 'volumeType',
'snapshotId'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertTrue(child.tag in ('attachments', 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.tag in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.tag]),
gr_child.text)
not_seen.remove(gr_child.tag)
self.assertEqual(0, len(not_seen))
def test_attach_show_create_serializer(self):
serializer = volumes.VolumeAttachmentTemplate()
raw_attach = dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')
text = serializer.serialize(dict(volumeAttachment=raw_attach))
print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachment', tree.tag)
self._verify_volume_attachment(raw_attach, tree)
def test_attach_index_serializer(self):
serializer = volumes.VolumeAttachmentsTemplate()
raw_attaches = [dict(
id='vol_id1',
volumeId='vol_id1',
serverId='instance1_uuid',
device='/foo1'),
dict(
id='vol_id2',
volumeId='vol_id2',
serverId='instance2_uuid',
device='/foo2')]
text = serializer.serialize(dict(volumeAttachments=raw_attaches))
print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachments', tree.tag)
self.assertEqual(len(raw_attaches), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('volumeAttachment', child.tag)
self._verify_volume_attachment(raw_attaches[idx], child)
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')],
displayName='vol_name',
displayDescription='vol_desc',
volumeType='vol_type',
snapshotId='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
print text
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
serverId='instance_uuid',
device='/foo1')],
displayName='vol1_name',
displayDescription='vol1_desc',
volumeType='vol1_type',
snapshotId='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',
serverId='instance_uuid',
device='/foo2')],
displayName='vol2_name',
displayDescription='vol2_desc',
volumeType='vol2_type',
snapshotId='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
tree = etree.fromstring(text)
self.assertEqual('volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
| 37.43379
| 79
| 0.591303
|
acfc2d8e90820d8f4c9fbffd5ccec2a21ab602b4
| 819
|
py
|
Python
|
app/util/configure.py
|
360ls/360ls-stitcher
|
554d063810a762beaaa019c4a91284c36bdc7312
|
[
"MIT"
] | 10
|
2018-10-27T23:53:28.000Z
|
2022-02-10T06:53:47.000Z
|
app/util/configure.py
|
360ls/360ls-stitcher
|
554d063810a762beaaa019c4a91284c36bdc7312
|
[
"MIT"
] | 1
|
2016-09-26T21:09:30.000Z
|
2016-09-26T21:09:30.000Z
|
app/util/configure.py
|
360ls/360ls-stitcher
|
554d063810a762beaaa019c4a91284c36bdc7312
|
[
"MIT"
] | 5
|
2017-02-12T00:18:29.000Z
|
2021-09-14T11:53:27.000Z
|
"""
Module responsible for configuration based on instructions in the profile.yml config file.
"""
from __future__ import absolute_import, division, print_function
from .configuration import Configuration
from .textformatter import TextFormatter
def main():
"""
Responsible for loading and printing standard configuration.
"""
get_configuration()
def get_configuration(config_profile="config/profiles/standard.yml"):
"""
Loads profile.yml to get configuration parameters.
"""
try:
configuration = Configuration(config_profile)
TextFormatter.print_info("Profile is valid and parsed properly.")
return configuration.get()
except ValueError:
TextFormatter.print_error("Profile was parsed, but it was invalid.")
if __name__ == "__main__":
main()
| 28.241379
| 90
| 0.728938
|
acfc2da33f1b45d126969d13c0b57174ef7fea47
| 586
|
py
|
Python
|
Labs/Topic06-functions/menuerepeat.py
|
olgarozhdestvina/pands-problems-2020
|
5d61bf6a86fbbe18adf00f547ce2380a39903e6e
|
[
"MIT"
] | null | null | null |
Labs/Topic06-functions/menuerepeat.py
|
olgarozhdestvina/pands-problems-2020
|
5d61bf6a86fbbe18adf00f547ce2380a39903e6e
|
[
"MIT"
] | null | null | null |
Labs/Topic06-functions/menuerepeat.py
|
olgarozhdestvina/pands-problems-2020
|
5d61bf6a86fbbe18adf00f547ce2380a39903e6e
|
[
"MIT"
] | null | null | null |
# Olga Rozhdestvina
# Function that keeps displaying the menu until the user picks q.
def menue():
print("What would you like to do?")
print("\t(a) Add new students")
print("\t(v) View students")
print("\t(q) Quit")
choice = input("Type one letter (a/v/q): ").strip()
return choice
def doAdd():
print("in adding")
def doView():
print("in viewing")
choice = menue()
while choice != 'q':
if choice == 'a':
doAdd()
elif choice == 'v':
doView()
else:
print("\n\nplease select either a, v or q")
choice = menue()
| 21.703704
| 66
| 0.578498
|
acfc2eda70361840ea38c0191d3f9a3ca5e537b8
| 498
|
py
|
Python
|
plotly/validators/layout/scene/annotation/_align.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/layout/scene/annotation/_align.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/layout/scene/annotation/_align.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='align',
parent_name='layout.scene.annotation',
**kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
values=['left', 'center', 'right'],
**kwargs
)
| 24.9
| 71
| 0.584337
|
acfc2f8fcd913d11056e649933173f979d2fbc5a
| 3,845
|
py
|
Python
|
gunicorn/reloader.py
|
Mattlk13/gunicorn
|
fa23cab8a221bb40d4e2afd4848de99ce095eb33
|
[
"MIT"
] | 2
|
2020-09-06T03:59:16.000Z
|
2021-02-21T16:32:28.000Z
|
gunicorn/reloader.py
|
Mattlk13/gunicorn
|
fa23cab8a221bb40d4e2afd4848de99ce095eb33
|
[
"MIT"
] | 10
|
2020-06-06T00:31:51.000Z
|
2022-03-12T00:04:46.000Z
|
gunicorn/reloader.py
|
Mattlk13/gunicorn
|
fa23cab8a221bb40d4e2afd4848de99ce095eb33
|
[
"MIT"
] | 2
|
2019-12-02T14:51:18.000Z
|
2019-12-07T11:59:15.000Z
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# pylint: disable=no-else-continue
import os
import os.path
import re
import sys
import time
import threading
COMPILED_EXT_RE = re.compile(r'py[co]$')
class Reloader(threading.Thread):
def __init__(self, extra_files=None, interval=1, callback=None):
super().__init__()
self.setDaemon(True)
self._extra_files = set(extra_files or ())
self._extra_files_lock = threading.RLock()
self._interval = interval
self._callback = callback
def add_extra_file(self, filename):
with self._extra_files_lock:
self._extra_files.add(filename)
def get_files(self):
fnames = [
COMPILED_EXT_RE.sub('py', module.__file__)
for module in tuple(sys.modules.values())
if getattr(module, '__file__', None)
]
with self._extra_files_lock:
fnames.extend(self._extra_files)
return fnames
def run(self):
mtimes = {}
while True:
for filename in self.get_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
if self._callback:
self._callback(filename)
time.sleep(self._interval)
has_inotify = False
if sys.platform.startswith('linux'):
try:
from inotify.adapters import Inotify
import inotify.constants
has_inotify = True
except ImportError:
pass
if has_inotify:
class InotifyReloader(threading.Thread):
event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
| inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
| inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
| inotify.constants.IN_MOVED_TO)
def __init__(self, extra_files=None, callback=None):
super().__init__()
self.setDaemon(True)
self._callback = callback
self._dirs = set()
self._watcher = Inotify()
for extra_file in extra_files:
self.add_extra_file(extra_file)
def add_extra_file(self, filename):
dirname = os.path.dirname(filename)
if dirname in self._dirs:
return
self._watcher.add_watch(dirname, mask=self.event_mask)
self._dirs.add(dirname)
def get_dirs(self):
fnames = [
os.path.dirname(COMPILED_EXT_RE.sub('py', module.__file__))
for module in tuple(sys.modules.values())
if getattr(module, '__file__', None)
]
return set(fnames)
def run(self):
self._dirs = self.get_dirs()
for dirname in self._dirs:
self._watcher.add_watch(dirname, mask=self.event_mask)
for event in self._watcher.event_gen():
if event is None:
continue
filename = event[3]
self._callback(filename)
else:
class InotifyReloader(object):
def __init__(self, callback=None):
raise ImportError('You must have the inotify module installed to '
'use the inotify reloader')
preferred_reloader = InotifyReloader if has_inotify else Reloader
reloader_engines = {
'auto': preferred_reloader,
'poll': Reloader,
'inotify': InotifyReloader,
}
| 28.69403
| 88
| 0.580234
|
acfc2fdb1647f8625f887b1f13129f24288be985
| 243
|
py
|
Python
|
jp.atcoder/abc053/arc068_b/8207697.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc053/arc068_b/8207697.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc053/arc068_b/8207697.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
n = int(input())
cards = [int(a) for a in input().split()]
pairwise_distincts = set(cards)
if (len(cards) - len(pairwise_distincts)) % 2 == 0:
ans = len(pairwise_distincts)
else:
ans = len(pairwise_distincts) - 1
print(ans)
| 22.090909
| 52
| 0.633745
|
acfc2fff450851194e4fb7331d69721187bed6a5
| 1,758
|
py
|
Python
|
pluginsinterface/TypeExtension.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | null | null | null |
pluginsinterface/TypeExtension.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | 1
|
2020-09-22T02:30:40.000Z
|
2020-09-22T02:30:40.000Z
|
pluginsinterface/TypeExtension.py
|
lonelyion/TweetToBot-Docker
|
ea91a9d93bad2b757c2ba0923ae9f1cd0f5ac278
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from enum import Enum
"""
插件相关类
"""
class NoInstance(type):
#通过__call___方法控制访问
def __call__(self, *args, **kwargs):
raise TypeError('禁止实例化')
#消息类型 private、group、tempprivate、tempgroup 私聊(一对一)、群聊(一对多)、临时聊天(一对一)、临时群聊(一对多)
class PlugMsgTypeEnum(metaclass=NoInstance):
"""
插件消息类型定义类
不可实例化 多个类型过滤使用 | 分隔(例:PlugMsgTypeEnum.private | PlugMsgTypeEnum.group)
none #全部忽略(用于授权)
unknown #未知类型(仅用于事件生成过程)
private #私聊
group #群聊
tempprivate #临时私聊
tempgroup #临时群聊
plugadmin #插件管理者(仅用于消息过滤时识别管理者)
"""
none = 0
unknown = 0
private = 1
group = 2
tempprivate = 4
tempgroup = 8
plugadmin = 16 #插件管理者
allowall = 1 | 2 | 4 | 8 | 16
@staticmethod
def getAllowlist(code) -> list:
l = []
for key, value in PlugMsgTypeEnum.__dict__.items():
if not key.startswith('__') and key not in ('getAllowlist',
'getMsgtype',
'allowall', 'none',
'plugadmin'):
if code & value:
l.append(key)
return l
@staticmethod
def getMsgtype(code) -> str:
for key, value in PlugMsgTypeEnum.__dict__.items():
if not key.startswith('__') and key not in (
'getAllowlist', 'getMsgtype', 'allowall', 'plugadmin'):
if code & value:
return key
class PlugMsgReturn(Enum):
"""
插件返回值定义类
不可实例化
Ignore 消息忽略
Intercept 消息拦截
"""
Ignore = 1
Intercept = 2
Allow = 1
Refuse = 2
| 25.852941
| 78
| 0.506826
|
acfc303bbc19047625fe34dfdfb1deda2104e3b2
| 2,242
|
py
|
Python
|
oteapi/strategies/download/sftp.py
|
TorgeirUstad/oteapi-core
|
60432a5e8a511cca8a9c52197a247de50e808096
|
[
"MIT"
] | null | null | null |
oteapi/strategies/download/sftp.py
|
TorgeirUstad/oteapi-core
|
60432a5e8a511cca8a9c52197a247de50e808096
|
[
"MIT"
] | 13
|
2022-01-31T10:15:56.000Z
|
2022-03-28T05:18:26.000Z
|
oteapi/strategies/download/sftp.py
|
TorgeirUstad/oteapi-core
|
60432a5e8a511cca8a9c52197a247de50e808096
|
[
"MIT"
] | null | null | null |
"""Strategy class for sftp/ftp"""
# pylint: disable=unused-argument
from dataclasses import dataclass
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING
import pysftp
from oteapi.datacache import DataCache
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict, Optional
from oteapi.models import ResourceConfig
@dataclass
class SFTPStrategy:
"""Strategy for retrieving data via sftp.
**Registers strategies**:
- `("scheme", "ftp")`
- `("scheme", "sftp")`
"""
download_config: "ResourceConfig"
def initialize(
self, session: "Optional[Dict[str, Any]]" = None
) -> "Dict[str, Any]":
"""Initialize."""
return {}
def get(self, session: "Optional[Dict[str, Any]]" = None) -> "Dict[str, Any]":
"""Download via sftp"""
cache = DataCache(self.download_config.configuration)
if cache.config.accessKey and cache.config.accessKey in cache:
key = cache.config.accessKey
else:
# Setup connection options
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
if not self.download_config.downloadUrl:
raise ValueError("downloadUrl is not defined in configuration.")
# open connection and store data locally
with pysftp.Connection(
host=self.download_config.downloadUrl.host,
username=self.download_config.downloadUrl.user,
password=self.download_config.downloadUrl.password,
port=self.download_config.downloadUrl.port,
cnopts=cnopts,
) as sftp:
# Because of insane locking on Windows, we have to close
# the downloaded file before adding it to the cache
with NamedTemporaryFile(prefix="oteapi-sftp-", delete=False) as handle:
localpath = Path(handle.name).resolve()
try:
sftp.get(self.download_config.downloadUrl.path, localpath=localpath)
key = cache.add(localpath.read_bytes())
finally:
localpath.unlink()
return {"key": key}
| 32.492754
| 88
| 0.611954
|
acfc30791aeb782077a7b465babc3f2d2a1b5eaa
| 426
|
py
|
Python
|
Taller de Estrucuras de Control Repeticion/punto 9.py
|
JFEscobarM/Clase-de-algoritmos
|
4fcf81b0bc55917efc202e0319442dac24f630c2
|
[
"MIT"
] | null | null | null |
Taller de Estrucuras de Control Repeticion/punto 9.py
|
JFEscobarM/Clase-de-algoritmos
|
4fcf81b0bc55917efc202e0319442dac24f630c2
|
[
"MIT"
] | null | null | null |
Taller de Estrucuras de Control Repeticion/punto 9.py
|
JFEscobarM/Clase-de-algoritmos
|
4fcf81b0bc55917efc202e0319442dac24f630c2
|
[
"MIT"
] | null | null | null |
"""
Datos de entrada
Numero de consumidores de combustible-->int-->f
Datos de salida
Todos los datos recolectados-->int-->f
Gasolina-->int-->g
Alcool-->int-->a
Diesel-->int-->d
"""
a=0
d=0
g=0
while True:
f=int(input(""))
if(f==1):
a=a+1
elif(f==2):
g=g+1
elif(f==3):
d=d+1
elif(f==4):
break
print(f"MUITO OBRIGADO\nAlcool: {a} \nGasolina: {g} \nDiesel: {d}")
| 16.384615
| 67
| 0.539906
|
acfc31f08ac0d970ab144987b113cdf1ed815b5e
| 3,400
|
py
|
Python
|
src/unicon/plugins/gaia/__init__.py
|
nielsvanhooy/unicon.plugins
|
3416fd8223f070cbb67a2cbe604e3c5d13584318
|
[
"Apache-2.0"
] | 18
|
2019-11-23T23:14:53.000Z
|
2022-01-10T01:17:08.000Z
|
src/unicon/plugins/gaia/__init__.py
|
nielsvanhooy/unicon.plugins
|
3416fd8223f070cbb67a2cbe604e3c5d13584318
|
[
"Apache-2.0"
] | 12
|
2020-11-09T20:39:25.000Z
|
2022-03-22T12:46:59.000Z
|
src/unicon/plugins/gaia/__init__.py
|
nielsvanhooy/unicon.plugins
|
3416fd8223f070cbb67a2cbe604e3c5d13584318
|
[
"Apache-2.0"
] | 32
|
2020-02-12T15:42:22.000Z
|
2022-03-15T16:42:10.000Z
|
'''
Author: Sam Johnson
Contact: samuel.johnson@gmail.com
https://github.com/TestingBytes
Contents largely inspired by sample Unicon repo:
https://github.com/CiscoDevNet/pyats-plugin-examples/tree/master/unicon_plugin_example/src/unicon_plugin_example
'''
from unicon.plugins.generic.connection_provider import GenericSingleRpConnectionProvider
from unicon.plugins.generic import GenericSingleRpConnection, ServiceList
from unicon.plugins.generic import service_implementation as svc
from unicon.plugins.linux import service_implementation as linux_svc
from unicon.plugins.gaia import service_implementation as gaia_svc
from unicon.plugins.gaia.statemachine import GaiaStateMachine
from unicon.plugins.gaia.settings import GaiaSettings
from time import sleep
class GaiaConnectionProvider(GenericSingleRpConnectionProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# used for tracking the initial state - it impacts the commands used
# for state changes
self.initial_state = ''
def init_handle(self):
con = self.connection
self.initial_state = con.state_machine.current_state
# The state machine path commands are different depending on the
# initial state. If the default shell is configured to be 'expert'
# mode the path commands are:
# 'clish' for expert -> clish
# 'exit' for clish -> expert
# If the initial state is determined to be 'expert' mode, the
# commands are updated and the switchto service is used to put
# the gateway into clish mode.
if self.initial_state == 'expert':
path = con.state_machine.get_path('clish', 'expert')
path.command = 'exit'
path = con.state_machine.get_path('expert', 'clish')
path.command = 'clish'
# switch to clish if in expert on connect
con.switchto('clish')
if self.connection.goto_enable:
con.state_machine.go_to('clish',
self.connection.spawn,
context=self.connection.context,
prompt_recovery=self.prompt_recovery,
timeout=self.connection.connection_timeout)
self.execute_init_commands()
def disconnect(self):
""" Logout and disconnect from the device
"""
con = self.connection
if con.connected:
con.log.info('disconnecting...')
con.switchto(self.initial_state)
con.sendline('exit')
sleep(2)
con.log.info('closing connection...')
con.spawn.close()
class GaiaServiceList(ServiceList):
""" gaia services """
def __init__(self):
super().__init__()
self.execute = gaia_svc.GaiaExecute
self.sendline = svc.Sendline
self.ping = linux_svc.Ping
self.traceroute = gaia_svc.GaiaTraceroute
self.switchto = gaia_svc.GaiaSwitchTo
class GaiaConnection(GenericSingleRpConnection):
"""
Connection class for Gaia OS connections
"""
os = 'gaia'
platform = None
chassis_type = 'single_rp'
state_machine_class = GaiaStateMachine
connection_provider_class = GaiaConnectionProvider
subcommand_list = GaiaServiceList
settings = GaiaSettings()
| 33.663366
| 112
| 0.661176
|
acfc339f2dc5e1c62fff0a3c0f09e3270748dba4
| 5,590
|
py
|
Python
|
musicbot/gpm.py
|
sarisia/MusicBot-GPM
|
39163d9c15a66779e6f77b6672e3f5b2307dec58
|
[
"MIT"
] | null | null | null |
musicbot/gpm.py
|
sarisia/MusicBot-GPM
|
39163d9c15a66779e6f77b6672e3f5b2307dec58
|
[
"MIT"
] | null | null | null |
musicbot/gpm.py
|
sarisia/MusicBot-GPM
|
39163d9c15a66779e6f77b6672e3f5b2307dec58
|
[
"MIT"
] | null | null | null |
import asyncio
import sqlite3
import subprocess
import os
import sys
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from pathlib import Path
from logging import getLogger
# I NEED ASYNCHRONOUS GPM LIBRARY!
allow_requests = True
from gmusicapi import Musicmanager
from .exceptions import ExtractionError
log = getLogger(__name__)
class GPMClient():
def __init__(self, loop):
self.loop = loop
self.tpool = ThreadPoolExecutor(max_workers=2)
self.client = Musicmanager(debug_logging=False)
self.bot_dir = Path.cwd()
self.dl_dir = self.bot_dir/"audio_cache"
self.gpm_config_dir = self.bot_dir/"config"/"gpm"
self.gpm_config_dir.mkdir(exist_ok=True)
self.credential = None
if (self.gpm_config_dir/"credential").is_file():
self.credential = str(self.gpm_config_dir/"credential")
self.logged_in = False
# Throws exception
self.logged_in = self.client.login(self.credential)
self.ffprobe = self._find_ffprobe()
# Just wrap blocking functions to run in other thread.
async def update_db(self):
return await self.loop.run_in_executor(self.tpool, partial(self._update_db))
async def download(self, entry):
return await self.loop.run_in_executor(self.tpool, partial(self._download, entry))
async def search(self, args):
return await self.loop.run_in_executor(self.tpool, partial(self._search, args))
# This is a native coroutine
async def play(self, player, trackinfo, **meta):
return await player.playlist.add_gpm_entry(trackinfo, **meta)
async def play_from_id(self, player, gpmid):
trackinfo = await self.loop.run_in_executor(self.tpool, partial(self._get_trackinfo, gpmid))
if not trackinfo:
raise ExtractionError("Failed to get trackinfo matches given GPMID.")
await player.playlist.add_gpm_entry(trackinfo)
def _update_db(self):
tracklist = self.client.get_uploaded_songs()
if not tracklist:
return None
db = sqlite3.connect(str(self.gpm_config_dir/"track.db"))
db.execute("DROP TABLE IF EXISTS gpm")
db.execute("CREATE TABLE IF NOT EXISTS gpm(title, artist, album, gpmid)")
db.executemany("INSERT INTO gpm VALUES (:title, :artist, :album, :id)", tracklist)
db.commit()
db.close()
return len(tracklist)
def _download(self, entry):
target = self.dl_dir/entry.expected_filename
# Let it try 3 times
for _ in range(3):
_, abyte = self.client.download_song(entry.gpmid)
if abyte:
break
if not abyte:
return False, None
with open(target, "wb") as f:
f.write(abyte)
return True, target
def _get_duration(self, audio_file):
if not self.ffprobe:
return
target = str(audio_file)
cmd = self.ffprobe + " -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 " + target
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, __ = proc.communicate()
log.debug("ffprobe stdout says: {}".format(stdout.decode("utf-8")))
# S**T
# Ensure with regular expression
return int(float(stdout.decode("utf-8").strip()))
def _search(self, args):
db = sqlite3.connect(str(self.gpm_config_dir/"track.db"))
db.execute("CREATE TABLE IF NOT EXISTS gpm(title, artist, album, gpmid)")
# Need better way to search DB...
query = "%" + "%".join(args) + "%"
cur = db.execute("SELECT * FROM gpm WHERE title||' '||artist||' '||album LIKE ?", [query, ])
result = cur.fetchall()
db.close()
res = []
for item in result:
res.append(GPMTrack(item))
return res
def _get_trackinfo(self, gpmid):
db = sqlite3.connect(str(self.gpm_config_dir/"track.db"))
db.execute("CREATE TABLE IF NOT EXISTS gpm(title, artist, album, gpmid)")
true_gpmid = gpmid.split(":")[2]
if not true_gpmid:
return
cur = db.execute("SELECT * FROM gpm WHERE gpmid = ?", [true_gpmid, ])
result = cur.fetchone()
db.close()
return GPMTrack(result) if result else None
def _find_ffprobe(self):
program = "ffprobe"
# Original: musicbot/player.py
def is_exe(fpath):
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if not found and sys.platform == 'win32':
fpath = fpath + ".exe"
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
return found
fpath, __ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
log.debug("Failed to get ffprobe.")
return None
class GPMTrack():
def __init__(self, item):
self.title = item[0]
self.artist = item[1]
self.album = item[2]
self.gpmid = item[3]
| 33.076923
| 120
| 0.594633
|
acfc34981cf6274ac6df8b8ef67385c7c5456b57
| 430
|
py
|
Python
|
schoolport/app_core/migrations/0012_auto_20210511_0019.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
schoolport/app_core/migrations/0012_auto_20210511_0019.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
schoolport/app_core/migrations/0012_auto_20210511_0019.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-05-10 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_core', '0011_auto_20210511_0018'),
]
operations = [
migrations.AlterField(
model_name='tb_student',
name='name',
field=models.CharField(max_length=255, null=True, verbose_name='Student Name'),
),
]
| 22.631579
| 91
| 0.618605
|
acfc34d03aa6afc3bedb292f4ea69c8bcd5e3ca4
| 264
|
py
|
Python
|
nameko_zipkin/constants.py
|
zsiciarz/nameko-zipkin
|
e4f73bb3fe2754e240779d3a5f5085ba223b1e79
|
[
"Apache-2.0"
] | 6
|
2017-07-02T07:56:30.000Z
|
2020-12-26T19:43:21.000Z
|
nameko_zipkin/constants.py
|
zsiciarz/nameko-zipkin
|
e4f73bb3fe2754e240779d3a5f5085ba223b1e79
|
[
"Apache-2.0"
] | 1
|
2019-09-16T11:56:22.000Z
|
2019-09-16T11:56:22.000Z
|
nameko_zipkin/constants.py
|
zsiciarz/nameko-zipkin
|
e4f73bb3fe2754e240779d3a5f5085ba223b1e79
|
[
"Apache-2.0"
] | 6
|
2017-05-15T15:22:45.000Z
|
2020-02-13T16:29:35.000Z
|
TRACE_ID_HEADER = 'X-B3-TraceId'
SPAN_ID_HEADER = 'X-B3-SpanId'
PARENT_SPAN_ID_HEADER = 'X-B3-ParentSpanId'
FLAGS_HEADER = 'X-B3-Flags'
SAMPLED_HEADER = 'X-B3-Sampled'
ZIPKIN_CONFIG_SECTION = 'ZIPKIN'
HANDLER_KEY = 'HANDLER'
HANDLER_PARAMS_KEY = 'HANDLER_PARAMS'
| 26.4
| 43
| 0.776515
|
acfc353dbd230f2cc9028fba86f6c1529f339c6e
| 33,928
|
py
|
Python
|
Python/python2_version/klampt/model/coordinates.py
|
smeng9/Klampt
|
7ff91bead90ac04280eff310623338fd10aaba79
|
[
"BSD-3-Clause"
] | 238
|
2015-01-09T15:21:27.000Z
|
2022-03-30T22:48:45.000Z
|
Python/klampt/model/coordinates.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | 89
|
2015-08-26T16:56:42.000Z
|
2022-03-29T23:45:46.000Z
|
Python/klampt/model/coordinates.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | 84
|
2015-01-10T18:41:52.000Z
|
2022-03-30T03:32:50.000Z
|
"""A module to help manage coordinate frames and objects attached to them.
Similar to the tf module in ROS.
You may attach points / vectors to frames and determine relative or world
coordinates in a straightforward, object-oriented way.
The ``coordinates`` module is set up with a default coordinate manager so that
if you call ``coordinates.[X]``, where ``[X]`` is a method of
:class:`klampt.model.coordinates.Manager`, such as ``setWorldModel()``,
``addPoint()``, ``addFrame()``, etc., then the default ``Manager``
instance gets called.
Advanced users might create their own ``Manager``, or swap top-level managers
in/out using :meth:`setManager`.
"""
from ..math import so3,se3,vectorops
from ..robotsim import RobotModelLink,RigidObjectModel
import ik
from collections import defaultdict
class Frame:
"""Represents some coordinate frame in space."""
def __init__(self,name,worldCoordinates=se3.identity(),
parent=None,relativeCoordinates=None):
self._name = name
self._parent = parent
self._worldCoordinates = worldCoordinates
self._data = None
if relativeCoordinates == None:
if worldCoordinates == None:
raise ValueError("One of relativeCoordinates or worldCoordinates must be provided")
if parent == None:
self._relativeCoordinates = worldCoordinates
else:
self._relativeCoordinates = se3.mul(se3.inv(parent.worldCoordinates()),worldCoordinates)
else:
self._relativeCoordinates = relativeCoordinates
if worldCoordinates == None:
if parent == None:
self._worldCoordinates = relativeCoordinates
else:
self._worldCoordinates = se3.mul(parent.worldCoordinates(),relativeCoordinates)
def name(self):
"""Returns the name of this frame"""
return self._name
def data(self):
"""If any data is attached to this frame, returns it"""
return self._data
def worldOrigin(self):
"""Returns an element of R^3 denoting the translation of the origin
of this frame in world coordinates"""
return self._worldCoordinates[1]
def relativeOrigin(self):
"""Returns an element of R^3 denoting the translation of the origin
of this frame relative to its parent"""
return self._relativeCoordinates[1]
def worldRotation(self):
"""Returns an element of SO(3) denoting the rotation from this frame
to world coordinates"""
return self._worldCoordinates[0]
def relativeRotation(self):
"""Returns an element of SO(3) denoting the rotation from this frame
to its parent"""
return self._relativeCoordinates[0]
def worldCoordinates(self):
"""Returns an element of SE(3) denoting the transform from this frame
to world coordinates"""
return self._worldCoordinates
def relativeCoordinates(self):
"""Returns an element of SE(3) denoting the transform from this frame
to its parent"""
return self._relativeCoordinates
def parent(self):
"""Returns the parent of the frame, or None if it's given in the world
frame."""
return self._parent
class Transform:
"""A transform from one Frame (source) to another (destination). The
destination may be None, in which case the transform is the world transform
of the source.
The difference between a Transform and a relative Frame (i.e., one with
a parent) is that a Transform is a sort of "read-only" structure whose
coordinates change as the frames' coordinates change."""
def __init__(self,source,destination=None):
assert isinstance(source,Frame)
if destination is not None:
assert isinstance(destination,Frame)
self._name = None
self._source = source
self._destination = destination
def source(self):
"""Returns the source Frame"""
return self._source
def destination(self):
"""Returns the source Frame"""
return self._destination
def coordinates(self):
"""Returns the SE(3) coordinates that transform elements from the
source to the destination Frame."""
if self._destination==None:
return self._source.worldCoordinates()
return se3.mul(se3.inv(self._destination.worldCoordinates()),self._source.worldCoordinates())
def translationCoordinates(self):
"""Returns the coordinates of the origin of this frame in R^3, relative
to its destination"""
if self._destination==None:
return self._source.worldOrigin()
return se3.apply(se3.inv(self._destination.worldCoordinates()),self._source.worldOrigin())
def rotationCoordinates(self):
"""Returns the SO(3) coordinates that rotate elements from the source
to the destination Frame"""
if self._destination==None:
return self._source.worldRotation()
return so3.mul(so3.inv(self._destination.worldRotation()),self._source.worldRotation())
def toWorld(self):
"""Returns a Transform designating the transformation from the
source frame to the world frame."""
return Transform(self.source,None)
def to(self,frame):
"""Returns a Transform designating the transformation from the
source frame to the given frame."""
return Transform(self.source,frame)
class Point:
"""Represents a point in 3D space. It is attached to a frame, so if the
frame is changed then its world coordinates will also change."""
def __init__(self,localCoordinates=[0,0,0],frame=None):
if frame is not None:
assert isinstance(frame,Frame)
self._name = None
self._localCoordinates = localCoordinates
self._frame = frame
def localCoordinates(self):
"""Returns the coordinates of this point in its parent Frame"""
return self._localCoordinates[:]
def worldCoordinates(self):
"""Returns the coordinates of this point in the world Frame"""
if self._frame ==None:
return self._localCoordinates[:]
return se3.apply(self._frame.worldCoordinates(),self._localCoordinates)
def frame(self):
"""Returns the frame to which this Point is attached"""
return self._frame
def toWorld(self):
"""Returns a Point representing the same point in space, but
in the world reference frame"""
return Point(self.worldCoordinates(),None)
def to(self,newframe):
"""Returns a Point representing the same point in space, but
in a different reference frame"""
if newframe == None or newframe=='world':
return self.toWorld()
newlocal = se3.apply(se3.inv(newframe.worldCoordinates()),self.worldCoordinates())
return Point(newlocal,newframe)
def localOffset(self,dir):
"""Offsets this point by a vector in local coordinates"""
self._localCoordinates = vectorops.add(self._localCoordinates,dir)
def worldOffset(self,dir):
"""Offsets this point by a vector in world coordinates"""
if self._frame == None:
self._localCoordinates = vectorops.add(self._localCoordinates,dir)
else:
self._localCoordinates = vectorops.add(so3.apply(so3.inv(self._frame.worldCoordinates()[0]),self._localCoordinates),dir)
class Direction:
"""Represents a directional quantity in 3D space. It is attached to a
frame, so if the frame is rotated then its world coordinates will also
change."""
def __init__(self,localCoordinates=[0,0,0],frame=None):
if frame is not None:
assert isinstance(frame,Frame)
self._name = None
self._localCoordinates = localCoordinates
self._frame = frame
def localCoordinates(self):
return self._localCoordinates[:]
def worldCoordinates(self):
if self._frame ==None:
return self._localCoordinates[:]
return so3.apply(self._frame.worldCoordinates()[0],self._localCoordinates)
def frame(self):
return self._frame
def toWorld(self):
"""Returns a Direction representing the same direction in space, but
in the world reference frame"""
return Direction(self.worldCoordinates(),None)
def to(self,newframe):
"""Returns a Direction representing the same direction in space, but
in a different reference frame"""
if newframe == None or newframe=='world':
return self.toWorld()
newlocal = so3.apply(so3.inv(newframe.worldCoordinates()[0]),self.worldCoordinates())
return Direction(newlocal,newframe)
def scale(self,amount):
"""Scales this direction by a scalar amount"""
self._localCoordinates = vectorops.mul(self._localCoordinates,amount)
def localOffset(self,dir):
"""Offsets this direction by a vector in local coordinates"""
self._localCoordinates = vectorops.add(self._localCoordinates,dir)
def worldOffset(self,dir):
"""Offsets this direction by a vector in world coordinates"""
if self._frame == None:
self._localCoordinates = vectorops.add(self._localCoordinates,dir)
else:
self._localCoordinates = vectorops.add(so3.apply(so3.inv(self._frame.worldCoordinates()[0]),self._localCoordinates),dir)
class Group:
"""A collection of Frames, Points, Directions, and sub-Groups.
All groups have a privileged frame called 'root'.
The default manager is a Group with a privileged frame called 'world'
which is just an alias for 'root'.
Subgroup items can be accessed using the syntax [group]:[itemname].
Subgroups can also be nested.
Attributes:
frames (dict): a map from frame names to Frame objects
childLists (dict): a map from frame names to lists of children
points (dict): a map from point names to Point objects
directions (dict): a map from direction names to Direction objects
subgroups (dict): a map from subgroup names to Group objects
"""
def __init__(self):
self._name = None
self.destroy()
def rootFrame(self):
return self.frames.get('root',None)
def destroy(self):
"""Call this to destroy a group cleanly"""
self.frames = {}
self.childLists = defaultdict(list)
self.frames['root'] = Frame('root')
self.points = {}
self.directions = {}
self.subgroups = {}
def setWorldModel(self,worldModel):
"""Sets this group to contain all entities of a world model"""
for i in xrange(worldModel.numRobots()):
rgroup = self.addGroup(worldModel.robot(i).getName())
rgroup.setRobotModel(worldModel.robot(i))
for i in xrange(worldModel.numRigidObjects()):
try:
f = self.addFrame(worldModel.rigidObject(i).getName(),worldCoordinates=worldModel.rigidObject(i).getTransform())
f._data = worldModel.rigidObject(i)
except ValueError:
f = self.addFrame("%s[%d]"%(worldModel.rigidObject(i).getName(),i),worldCoordinates=worldModel.rigidObject(i).getTransform())
f._data = worldModel.rigidObject(i)
for i in xrange(worldModel.numTerrains()):
try:
f = self.addFrame(worldModel.terrain(i).getName(),worldCoordinates=se3.identity())
f._data = worldModel.terrain(i)
except ValueError:
f = self.addFrame("%s[%d]"%(worldModel.terrain(i).getName(),i),worldCoordinates=se3.identity())
f._data = worldModel.terrain(i)
return
def setRobotModel(self,robotModel):
"""Sets this group to contain all links of a robot model"""
root = self.frames['root']
for i in xrange(robotModel.numLinks()):
p = robotModel.link(i).getParent()
if p >= 0:
Fp = self.frames[robotModel.link(p).getName()]
else:
Fp = root
f = self.addFrame(robotModel.link(i).getName(),worldCoordinates=robotModel.link(i).getTransform(),parent=Fp)
f._data = robotModel.link(i)
return
def setController(self,controller):
"""Given a robotController, sets this group to contain all sensed
and commanded frames."""
root = self.frames['root']
robot = controller.robot()
robot.setConfig(controller.getCommandedConfig())
for i in xrange(robot.numLinks()):
if p >= 0:
Fp = self.frames[robotModel.link(p).getName()+"_commanded"]
else:
Fp = root
f = self.addFrame(robot.link(i).getName()+"_commanded",worldCoordinates=robot.link(i).getTransform(),parent=Fp)
f._data = (controller,i,'commanded')
robot.setConfig(controller.getSensedConfig())
for i in xrange(robot.numLinks()):
if p >= 0:
Fp = self.frames[robotModel.link(p).getName()+"_commanded"]
else:
Fp = root
f = self.addFrame(robot.link(i).getName()+"_sensed",worldCoordinates=robot.link(i).getTransform(),parent=Fp)
f._data = (controller,i,'sensed')
return
def setSimBody(self,name,simBody):
"""Sets this group to be attached to a simBody"""
f = self.addFrame(name,worldCoordinates=simBody.getTransform())
f._data = simBody
return
def updateFromWorld(self):
"""For any frames with associated world elements, updates the
transforms from the world elements."""
for (n,f) in self.frames.iteritems():
if f._data == None:
continue
if hasattr(f._data,'getTransform'):
worldCoordinates = f._data.getTransform()
if hasattr(f._data,'getParent'):
p = f._data.getParent()
if p >= 0:
plink = f._data.robot().link(p)
parentCoordinates = plink.getTransform()
f._relativeCoordinates = se3.mul(se3.inv(parentCoordinates),worldCoordinates)
else:
f._relativeCoordinates = worldCoordinates
else:
f._relativeCoordinates = worldCoordinates
f._worldCoordinates = worldCoordinates
#update downstream non-link items
for c in self.childLists[f._name]:
if c._data == None or not hasattr(c._data,'getTransform'):
c._worldCoordinates = se3.mul(f._worldCoordinates,c._relativeCoordinates)
self.updateDependentFrames(c)
if isinstance(f._data,tuple) and isinstance(f._data[0],SimRobotController):
controller,index,itemtype = f._data
#TODO: update the frame from the controller data
for (n,g) in self.subgroups.iteritems():
g.updateFromWorld()
def updateToWorld(self):
"""For any frames with associated world elements, updates the
transforms of the world elements. Note: this does NOT perform inverse
kinematics!"""
for (n,f) in self.frames.iteritems():
if f.data == None: continue
if hasattr(f.data,'setTransform'):
f.data.setTransform(*f.worldCoordinates())
for (n,g) in self.subgroups.iteritems():
g.updateToWorld()
def addFrame(self,name,worldCoordinates=None,parent=None,relativeCoordinates=None):
"""Adds a new named Frame, possibly with a parent. 'parent' may either be a string
identifying another named Frame in this Group, or it can be a Frame object. (Warning:
unknown behavior may result from specifying a Frame not in this Group).
Either worldCoordinates or relativeCoordinates must be given. If worldCoordinates is given,
then the frame's initial relative transform is determined by the current coordinates of the
parent. If all parameters are left as default, the frame is placed directly at the origin
of the parent"""
if name in self.frames:
raise ValueError("Frame "+name+" already exists")
if parent==None:
parent = 'root'
if isinstance(parent,str):
parent = self.frames[parent]
if worldCoordinates == None and relativeCoordinates == None:
relativeCoordinates = se3.identity()
self.frames[name] = Frame(name,worldCoordinates=worldCoordinates,parent=parent,relativeCoordinates=relativeCoordinates)
self.childLists[parent._name].append(self.frames[name])
return self.frames[name]
def addPoint(self,name,coordinates=[0,0,0],frame='root'):
if name in self.points:
raise ValueError("Point "+name+" already exists")
res = self.point(coordinates,frame)
res._name = name
self.points[name] = res
return res
def addDirection(self,name,coordinates=[0,0,0],frame='root'):
if name in self.direction:
raise ValueError("Direction "+name+" already exists")
res = self.direction(coordinates,frame)
res._name = name
self.directions[name] = res
return res
def addGroup(self,name,group=None,parentFrame='root'):
"""Adds a subgroup to this group. If parentFrame is given,
then the group is attached relative to the given frame.
Otherwise, it is assumed attached to the root frame. """
if group==None:
group = Group()
if name in self.subgroups:
raise ValueError("Subgroup "+name+" already exists")
group._name = name
self.subgroups[name] = group
group.frames['root']._parent = self.frame(parentFrame)
return group
def deleteFrame(self,name):
"""Deletes the named frame. All items that refer to this frame
will be automatically converted to be relative to the root coordinate
system"""
assert name != 'root',"Root frame may not be deleted"
if name not in self.frames:
raise ValueError("Invalid frame to delete")
f = self.frames[name]
f._parent = None
if f._parent != None:
self.childLists[f._parent._name].remove(f)
for (n,p) in self.points.iteritems():
if p._parent == f:
p._localCoordinates = p.worldCoordinates()
p._parent = self.frames['root']
for (n,p) in self.directions.iteritems():
if p._parent == f:
p._localCoordinates = p.worldCoordinates()
p._parent = self.frames['root']
for c in self.childLists[name]:
p._relativeCoordinates = p._worldCoordinates
p._parent = self.frames['root']
del self.frames[name]
del self.childLists[name]
def deletePoint(self,name):
del self.points[name]
def deleteDirection(self,name):
del self.directions[name]
def deleteGroup(self,name):
del self.subgroups[name]
def setFrameCoordinates(self,name,coordinates,parent='relative'):
"""Sets the coordinates of the frame, given as an se3 element.
The coordinates can be given either in 'relative' mode, where the
coordinates are the natural coordinates of the frame relative to
its parent, or in 'world' mode, where the coordinates are the
global world coordinates, or they can be given relative to any
other frame in this coordinate Group. If None, this defaults
to the root frame of this Group."""
f = self.frame(name)
if parent==None:
parent = 'root'
if isinstance(parent,str):
if parent=='relative':
parent = f._parent
elif parent=='world':
parent = None
else:
parent = self.frames[parent]
if parent:
worldCoordinates = se3.mul(parent._worldCoordinates,coordinates)
else:
worldCoordinates = coordinates
if parent == f._parent:
f._relativeCoordinates = coordinates
else:
f._relativeCoordinates = se3.mul(se3.inv(f._parent._worldCoordinates),worldCoordinates)
f._worldCoordinates = worldCoordinates
self.updateDependentFrames(f)
def updateDependentFrames(self,frame):
"""Whenever Frame's world coordinates are updated, call this to update
the downstream frames. This will be called automatically via
setFrameCoordinates but not if you change a Frame's coordinates
manually."""
for c in self.childLists[frame._name]:
c._worldCoordinates = se3.mul(frame.worldCoordinates(),c._relativeCoordinates)
self.updateDependentFrames(c)
def frame(self,name):
"""Retrieves a named Frame."""
if isinstance(name,Frame): return name
try:
return self.frames[name]
except KeyError:
#try looking through groups
splits = name.split(":",1)
if len(splits)==1:
raise ValueError("Frame "+name+" does not exist")
if splits[0] not in self.subgroups:
raise ValueError("Frame "+name+" or subgroup "+splits[0]+" do not exist")
return self.subgroups[splits[0]].frame(splits[1])
def getPoint(self,name):
"""Retrieves a named Point."""
if isinstance(name,Point): return name
try:
return self.points[name]
except KeyError:
#try looking through groups
splits = name.split(":",1)
if len(splits)==1:
raise ValueError("Point "+name+" does not exist")
if splits[0] not in self.subgroups:
raise ValueError("Point "+name+" or subgroup "+splits[0]+" do not exist")
return self.subgroups[splits[0]].getPoint(splits[1])
def getDirection(self,name):
"""Retrieves a named Direction."""
if isinstance(name,Direction): return name
try:
return self.directions[name]
except KeyError:
#try looking through groups
splits = name.split(":",1)
if len(splits)==1:
raise ValueError("Direction "+name+" does not exist")
if splits[0] not in self.subgroups:
raise ValueError("Direction "+name+" or subgroup "+splits[0]+" do not exist")
return self.subgroups[splits[0]].getDirection(splits[1])
def toWorld(self,object):
"""Converts a Transform, Point, or Direction to have coordinates
relative to the world frame."""
return object.toWorld()
def to(self,object,frame):
"""Converts a Transform, Point, or Direction to have coordinates
relative to the given frame 'frame'."""
return object.to(self.frame(frame))
def transform(self,sourceFrame,destFrame='root'):
"""Makes a Transform object from the source frame to the destination
frame. """
return Transform(self.frame(sourceFrame),self.frame(testFrame))
def point(self,coordinates=[0,0,0],frame='root'):
"""Makes a Point object with the given local coordinates in the given
frame. Does not add it to the list of managed points."""
return Point(coordinates,self.frame(frame))
def direction(self,coordinates=[0,0,0],frame='root'):
"""Makes a Direction object with the given local coordinates in the
given frame. Does not add it to the list of managed points."""
return Direction(coordinates,self.frame(frame))
def pointFromWorld(self,worldCoordinates=[0,0,0],frame='root'):
"""Alias for to(point(worldCoordinates,'root'),frame)"""
f = self.frame(frame)
local = se3.apply(se3.inv(f._worldCoordinates),worldCoordinates)
return Point(local,f)
def directionFromWorld(self,worldCoordinates=[0,0,0],frame='world'):
"""Alias for to(direction(worldCoordinates,'root'),frame)"""
f = self.frame(frame)
local = so3.apply(so3.inv(f._worldCoordinates[0]),worldCoordinates)
return Direction(local,f)
def listFrames(self,indent=0):
"""Prints all the frames in this group and subgroups"""
for k,f in self.frames.iteritems():
if indent > 0:
print " "*(indent-1),
if f._parent == None:
print k
else:
print k,"(%s)"%(f._parent._name,)
for n,g in self.subgroups.iteritems():
if indent > 0:
print " "*(indent-1),
print n,":"
g.listFrames(indent+2)
def listItems(self,indent=0):
"""Prints all the items in this group"""
if len(self.frames) > 0:
if indent > 0:
print " "*(indent-1),
print "Frames:"
for k,f in self.frames.iteritems():
if indent > 0:
print " "*(indent+1),
if f._parent == None:
print k
else:
print k,"(%s)"%(f._parent._name,)
if len(self.points) > 0:
if indent > 0:
print " "*(indent-1),
print "Points:"
for k in self.points.iterkeys():
if indent > 0:
print " "*(indent+1),
print k
if len(self.directions) > 0:
if indent > 0:
print " "*(indent-1),
print "Directions:"
for k in self.directions.iterkeys():
if indent > 0:
print " "*(indent+1),
print k
if len(self.subgroups) > 0:
if indent > 0:
print " "*(indent-1),
print "Subgroups:"
for n,g in self.subgroups.iteritems():
if indent > 0:
print " "*(indent+1),
print n,":"
g.listItems(indent+2)
class Manager(Group):
"""A manager of coordinate frames."""
def __init__(self):
Group.__init__(self)
self._name = "world_group"
self.frames['world'] = self.frames['root']
def worldFrame(self):
return self.frames.get('world',None)
def destroy(self):
Group.destroy(self)
def deleteFrame(self,name):
assert name != 'world',"World frame may not be deleted"
def setFrameCoordinates(self,name,coordinates,parent='relative'):
assert name != 'world',"World frame must stay fixed at identity"
Group.setFrameCoordinates(self,name,coordinates,parent)
#create defaults so you can just call coordinates.addFrame() etc.
_defaultManager = Manager()
def _callfn(name):
global _defaultManager
return lambda *args,**kwargs:getattr(_defaultManager,name)(*args,**kwargs)
def manager():
"""Retrieves the default top-level manager"""
global _defaultManager
return _defaultManager
def setManager(manager):
"""Sets the new top-level manager to a new Manager instance, and
returns the old top-level manager."""
assert isinstance(manager,Manager),"setManager must be called with a Manager instance"
global _defaultManager
res = _defaultManager
_defaultManager = manager
return res
destroy = _callfn("destroy")
setWorldModel = _callfn("setWorldModel")
setRobotModel = _callfn("setRobotModel")
setController = _callfn("setController")
setSimBody = _callfn("setSimBody")
updateFromWorld = _callfn("updateFromWorld")
updateToWorld = _callfn("updateToWorld")
addFrame = _callfn("addFrame")
addPoint = _callfn("addPoint")
addDirection = _callfn("addDirection")
addGroup = _callfn("addGroup")
deleteFrame = _callfn("deleteFrame")
deletePoint = _callfn("deletePoint")
deleteDirection = _callfn("deleteDirection")
deleteGroup = _callfn("deleteGroup")
setFrameCoordinates = _callfn("setFrameCoordinates")
frame = _callfn("frame")
getPoint = _callfn("getPoint")
getDirection = _callfn("getDirection")
toWorld = _callfn("toWorld")
to = _callfn("to")
transform = _callfn("transform")
point = _callfn("point")
direction = _callfn("direction")
pointFromWorld = _callfn("pointFromWorld")
directionFromWorld = _callfn("directionFromWorld")
listFrames = _callfn("listFrames")
listItems = _callfn("listItems")
def _ancestor_with_link(frame):
"""Returns the nearest ancestor of the given frame attached to a robot
link or rigid object"""
while frame and (frame._data == None or not isinstance(frame._data,(RobotModelLink,RigidObjectModel))):
frame = frame._parent
return frame
def ik_objective(obj,target):
"""Returns an IK objective that attempts to fix the given
klampt.coordinates object 'obj' at given target object 'target'.
Arguments:
obj: An instance of one of the {Point,Direction,Transform,Frame} classes.
target: If 'obj' is a Point, Direction, or Frame objects, this
must be an object of the same type of 'obj' denoting the target to
which 'obj' should be fixed. In other words, the local coordinates
of 'obj' relative to 'target's parent frame will be equal to 'target's
local coordinates.
If obj is a Transform object, this element is an se3 object.
Returns:
IKObjective: An IK objective to be used with the klampt.ik module.
Since the klampt.ik module is not aware about custom frames, an
ancestor of the object must be attached to a RobotModelLink or a
RigidObjectModel, or else None will be returned. The same goes for target,
if provided.
TODO: support lists of objects to fix.
TODO: support Direction constraints.
"""
body = None
coords = None
ref = None
if isinstance(obj,Frame):
assert isinstance(target,Frame),"ik_objective: target must be of same type as obj"
body = obj
ref = target.parent()
coords = target.relativeCoordinates()
elif isinstance(obj,Transform):
if ref != None: print "ik_objective: Warning, ref argument passed with Transform object, ignoring"
body = obj.source()
ref = obj.destination()
coords = target
elif isinstance(obj,(Point,Direction)):
assert type(target)==type(obj),"ik_objective: target must be of same type as obj"
body = obj.frame()
ref = target.frame()
coords = target.localCoordinates()
else:
raise ValueError("Argument to ik_objective must be an object from the coordinates module")
linkframe = _ancestor_with_link(body)
if linkframe == None:
print "Warning: object provided to ik_objective is not attached to a robot link or rigid object, returning None"
return None
linkbody = linkframe._data
#find the movable frame attached to ref
refframe = _ancestor_with_link(ref) if ref != None else None
refbody = (refframe._data if refframe!=None else None)
if isinstance(obj,(Frame,Transform)):
#figure out the desired transform T[linkbody->refbody], given
#coords = T[obj->ref], T[obj->linkbody], T[ref->refbody]
#result = (T[ref->refbody] * coords * T[obj->linkbody]^-1)
if linkframe != body: coords = se3.mul(coords,Transform(linkframe,body).coordinates())
if refframe != ref: coords = se3.mul(Transform(ref,refframe).coordinates(),coords)
return ik.objective(linkbody,ref=refbody,R=coords[0],t=coords[1])
elif isinstance(obj,Point):
#figure out the local and world points
local = obj.to(linkframe).localCoordinates()
world = target.to(refframe).localCoordinates()
return ik.objective(linkbody,local=[local],world=[world])
elif isinstance(obj,Direction):
raise ValueError("Axis constraints are not yet supported in the klampt.ik module")
return None
def ik_fixed_objective(obj,ref=None):
"""Returns an IK objective that attempts to fix the given
klampt.coordinates object at its current pose. If ref=None,
its pose is fixed in world coordinates. Otherwise, its pose is fixed
relative to the reference frame ref.
Arguments:
obj: An instance of one of the {Point,Direction,Transform,Frame} classes.
ref (optional): either None, or a Frame object denoting the reference frame
to which the object should be fixed. (If obj is a Transform object,
its destination frame is used as the reference frame, and this argument
is ignored.)
Returns:
IKObjective: An IK objective to be used with the klampt.ik module. For
Point, Direction, and Frame objects this objective fixes the
object coordinates relative to the ref frame, or the world if None frame
is provided. For Transform objects the source frame is fixed
relative to the destination frame.
Since the klampt.ik module is not aware about custom frames, an
ancestor of the object must be attached to a RobotModelLink or a
RigidObjectModel, or else None will be returned. The same goes for ref,
if provided.
TODO: support lists of objects to fix.
TODO: support Direction constraints.
"""
if isinstance(obj,(Point,Direction)):
return ik_objective(obj,obj.to(ref))
elif isinstance(obj,Frame):
return ik_fixed_objective(Transform(obj,ref))
elif isinstance(obj,Transform):
if ref != None: print "ik_fixed_objective: Warning, ref argument passed with Transform object, ignoring"
return ik_objective(obj,obj.coordinates())
else:
raise ValueError("Argument to ik_fixed_objective must be an object from the coordinates module")
| 44.759894
| 141
| 0.636171
|
acfc35815e908ee90edf64de931d84ddc5d8befc
| 894
|
py
|
Python
|
Fracktory3-3.0_b11/plugins/PerObjectSettingsTool/__init__.py
|
ganeshmev/Fracktory3-3.0_b11_KLE
|
16066e6993b96a880aa1a2f044a27930cbd0787d
|
[
"MIT"
] | 1
|
2018-10-19T10:08:45.000Z
|
2018-10-19T10:08:45.000Z
|
misc/zip/Cura-master/plugins/PerObjectSettingsTool/__init__.py
|
criscola/G-Gen
|
293d4f46cb40d7917a10a95921040a14a086efc1
|
[
"MIT"
] | null | null | null |
misc/zip/Cura-master/plugins/PerObjectSettingsTool/__init__.py
|
criscola/G-Gen
|
293d4f46cb40d7917a10a95921040a14a086efc1
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from . import PerObjectSettingsTool
from . import PerObjectSettingVisibilityHandler
from PyQt5.QtQml import qmlRegisterType
from UM.i18n import i18nCatalog
i18n_catalog = i18nCatalog("cura")
def getMetaData():
return {
"tool": {
"name": i18n_catalog.i18nc("@label", "Per Model Settings"),
"description": i18n_catalog.i18nc("@info:tooltip", "Configure Per Model Settings"),
"icon": "tool_icon.svg",
"tool_panel": "PerObjectSettingsPanel.qml",
"weight": 3
},
}
def register(app):
qmlRegisterType(PerObjectSettingVisibilityHandler.PerObjectSettingVisibilityHandler, "Cura", 1, 0,
"PerObjectSettingVisibilityHandler")
return { "tool": PerObjectSettingsTool.PerObjectSettingsTool() }
| 34.384615
| 102
| 0.683445
|
acfc3586e1c0545d92d2555502c142e4b72bc379
| 44,199
|
py
|
Python
|
Lib/smtplib.py
|
ShivayStark/cpython
|
37aa35e9eaca4947d8a298a3247469c66f4891f9
|
[
"PSF-2.0"
] | 1
|
2020-04-25T07:55:21.000Z
|
2020-04-25T07:55:21.000Z
|
Lib/smtplib.py
|
ShivayStark/cpython
|
37aa35e9eaca4947d8a298a3247469c66f4891f9
|
[
"PSF-2.0"
] | null | null | null |
Lib/smtplib.py
|
ShivayStark/cpython
|
37aa35e9eaca4947d8a298a3247469c66f4891f9
|
[
"PSF-2.0"
] | 1
|
2020-10-02T03:34:00.000Z
|
2020-10-02T03:34:00.000Z
|
#! /usr/bin/env python3
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
bCRLF = b"\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, use it as is and hope for the best.
if addrstring.strip().startswith('<'):
return addrstring
return "<%s>" % addrstring
return "<%s>" % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
# Legacy method kept for backward compatibility.
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
def _fix_eols(data):
return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
sock = None
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
if host:
(code, msg) = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print(datetime.datetime.now().time(), *args, file=sys.stderr)
else:
print(*args, file=sys.stderr)
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
self.source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return (code, msg)
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if self.sock:
if isinstance(s, str):
# send is used by the 'data' command, where command_encoding
# should not be used, but 'data' needs to convert the string to
# binary itself anyway, so that's not a problem.
s = s.encode(self.command_encoding)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode("latin-1").split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd("rset")
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=()):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower()=='smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=()):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b"." + bCRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
an ASCII string that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
# RFC 4954 allows auth methods to provide an initial response. Not all
# methods support it. By definition, if they return something other
# than None when challenge is None, then they do. See issue #15014.
mechanism = mechanism.upper()
initial_response = (authobject() if initial_response_ok else None)
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
(code, resp) = self.docmd("AUTH", mechanism + " " + response)
else:
(code, resp) = self.docmd("AUTH", mechanism)
# If server responds with a challenge, send the response.
if code == 334:
challenge = base64.decodebytes(resp)
response = encode_base64(
authobject(challenge).encode('ascii'), eol='')
(code, resp) = self.docmd(response)
if code in (235, 503):
return (code, resp)
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
# CRAM-MD5 does not support initial-response.
if challenge is None:
return None
return self.user + " " + hmac.HMAC(
self.password.encode('ascii'), challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return "\0%s\0%s" % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPNotSupportedError(
"SMTP AUTH extension not supported by server.")
# Authentication methods the server claims to support
advertised_authlist = self.esmtp_features["auth"].split()
# Authentication methods we can handle in our preferred order:
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
# We try the supported authentications in our preferred order, if
# the server supports them.
authlist = [auth for auth in preferred_auths
if auth in advertised_authlist]
if not authlist:
raise SMTPException("No suitable authentication method found.")
# Some servers advertise authentication methods they don't really
# support, so if authentication fails, we continue until we've tried
# all methods.
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
(code, resp) = self.auth(
authmethod, getattr(self, method_name),
initial_response_ok=initial_response_ok)
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
if code in (235, 503):
return (code, resp)
except SMTPAuthenticationError as e:
last_exception = e
# We could not login successfully. Return result of last attempt.
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPNotSupportedError(
"STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock,
server_hostname=self._host)
self.file = None
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=(),
rcpt_options=()):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self._rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None,
mail_options=(), rcpt_options=()):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
# 'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
# Section 3.6.6). In such a case, we use the 'Resent-*' fields. However,
# if there is more than one 'Resent-' block there's no way to
# unambiguously determine which one is the most recent in all cases,
# so rather than guess we raise a ValueError in that case.
#
# TODO implement heuristics to guess the correct Resent-* block with an
# option allowing the user to enable the heuristics. (It should be
# possible to guess correctly almost all of the time.)
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block")
if from_addr is None:
# Prefer the sender field per RFC 2822:3.6.2.
from_addr = (msg[header_prefix + 'Sender']
if (header_prefix + 'Sender') in msg
else msg[header_prefix + 'From'])
from_addr = email.utils.getaddresses([from_addr])[0][1]
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'],
msg[header_prefix + 'Bcc'],
msg[header_prefix + 'Cc'])
if f is not None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
# Make a local copy so we can delete the bcc headers.
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
''.join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
"One or more source or delivery addresses require"
" internationalized email support, but the server"
" does not advertise the required SMTPUTF8 capability")
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(
bytesmsg, policy=msg.policy.clone(utf8=True))
mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME')
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout,
self.source_address)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname=local_hostname,
source_address=source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port, source_address=source_address)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ": ")
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print("Message length is %d" % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| 39.49866
| 89
| 0.589199
|
acfc375958b73faff737d95b94dda8d37f0baca2
| 391
|
py
|
Python
|
Gallery/wsgi.py
|
georgealusa/Personal_Gallery
|
cf9430194c2bcf958fa0eac3ef12d46371894607
|
[
"MIT"
] | null | null | null |
Gallery/wsgi.py
|
georgealusa/Personal_Gallery
|
cf9430194c2bcf958fa0eac3ef12d46371894607
|
[
"MIT"
] | null | null | null |
Gallery/wsgi.py
|
georgealusa/Personal_Gallery
|
cf9430194c2bcf958fa0eac3ef12d46371894607
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Gallery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Gallery.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
acfc378634bac84aed7734ccbc747ef4fbf829e6
| 8,624
|
py
|
Python
|
experiments/tcn/tcn_train.py
|
kolaszko/haptic_transformer
|
51804d8301c0ba312a4ca0f23b296236acbb7f42
|
[
"MIT"
] | 17
|
2021-03-04T11:43:47.000Z
|
2022-02-17T12:47:24.000Z
|
experiments/tcn/tcn_train.py
|
kolaszko/haptic_transformer
|
51804d8301c0ba312a4ca0f23b296236acbb7f42
|
[
"MIT"
] | null | null | null |
experiments/tcn/tcn_train.py
|
kolaszko/haptic_transformer
|
51804d8301c0ba312a4ca0f23b296236acbb7f42
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import socket
import time
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import utils
from data import HapticDataset
from models import TemporalConvNet
def main(args):
params = {
'num_classes': args.num_classes,
'sequence_length': args.sequence_length,
'feed_forward': args.feed_forward,
'levels': args.levels,
'nhid': args.nhid,
'dropout': args.dropout,
'lr': args.lr,
'gamma': args.gamma,
'weight_decay': args.weight_decay,
'batch_size': args.batch_size
}
print(params)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
train_ds = HapticDataset(args.dataset_path, 'train_ds', signal_start=0, signal_length=params['sequence_length'])
val_ds = HapticDataset(args.dataset_path, 'val_ds', signal_start=0, signal_length=params['sequence_length'])
test_ds = HapticDataset(args.dataset_path, 'test_ds', signal_start=0, signal_length=params['sequence_length'])
train_dataloader = DataLoader(train_ds, batch_size=params['batch_size'], shuffle=True)
val_dataloader = DataLoader(val_ds, batch_size=params['batch_size'], shuffle=True)
test_dataloader = DataLoader(test_ds, batch_size=params['batch_size'], shuffle=True)
results = {}
torch.manual_seed(42)
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
log_dir = os.path.join(
'tcn_runs', current_time + '_' + socket.gethostname())
channel_sizes = [args.nhid] * args.levels
model = TemporalConvNet(6, channel_sizes, dropout=params['dropout'], ff=params['feed_forward'],
num_classes=params['num_classes'])
model.to(device)
summary(model, input_size=(160, 6))
optimizer = torch.optim.AdamW(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=5e-6)
weight = torch.Tensor(train_ds.weights)
w = weight.to(device)
criterion = nn.CrossEntropyLoss(weight=w)
best_acc_val = 0
best_acc_test = 0
with SummaryWriter(log_dir=log_dir) as writer:
print('======== LOG ========')
print(writer.log_dir)
print('======== ========')
with open(os.path.join(log_dir, 'params.json'), 'w') as f:
f.write(json.dumps(params))
y_pred_val = []
y_true_val = []
y_pred_test = []
y_true_test = []
for epoch in range(args.epochs):
print(f'Epoch: {epoch}')
# Training
mean_loss = 0.0
correct = 0
model.train(True)
for step, data in enumerate(train_dataloader):
s, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
out = model(s.float())
loss = criterion(out, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(out.data, 1)
correct += (predicted == labels).sum().item()
mean_loss += loss.item()
print(f'Running loss training: {loss.item()} in step: {step}')
writer.add_scalar('loss/train', mean_loss / len(train_ds), epoch)
writer.add_scalar('accuracy/train', (100 * correct / len(train_ds)), epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
scheduler.step()
# Validation
mean_loss = 0.0
correct = 0
y_pred = []
y_true = []
model.train(False)
with torch.no_grad():
for step, data in enumerate(val_dataloader):
s, labels = data[0].to(device), data[1].to(device)
out = model(s.float())
loss = criterion(out, labels)
_, predicted = torch.max(out.data, 1)
y_pred.extend(predicted.data.cpu().numpy())
y_true.extend(labels.data.cpu().numpy())
correct += (predicted == labels).sum().item()
mean_loss += loss.item()
print(f'Running loss validation: {loss.item()} in step: {step}')
acc = (100 * correct / len(val_ds))
if acc > best_acc_val:
best_acc_val = acc
torch.save(model, os.path.join(writer.log_dir, 'val_model'))
results['val'] = best_acc_val
y_pred_val = y_pred
y_true_val = y_true
print('========== ACC ==========')
print(best_acc_val)
print(f'Epoch: {epoch}')
print('========== === ==========')
writer.add_scalar('loss/val', mean_loss / len(val_ds), epoch)
writer.add_scalar('accuracy/val', acc, epoch)
# Test
mean_loss = 0.0
correct = 0
y_pred = []
y_true = []
model.train(False)
with torch.no_grad():
for step, data in enumerate(test_dataloader):
s, labels = data[0].to(device), data[1].to(device)
out = model(s.float())
loss = criterion(out, labels)
_, predicted = torch.max(out.data, 1)
y_pred.extend(predicted.data.cpu().numpy())
y_true.extend(labels.data.cpu().numpy())
correct += (predicted == labels).sum().item()
mean_loss += loss.item()
print(f'Running loss test: {loss.item()} in step: {step}')
acc = (100 * correct / len(test_ds))
if acc > best_acc_test:
best_acc_test = acc
torch.save(model, os.path.join(writer.log_dir, 'test_model'))
results['test'] = best_acc_test
y_pred_test = y_pred
y_true_test = y_true
print('========== ACC ==========')
print(best_acc_test)
print(f'Epoch: {epoch}')
print('========== === ==========')
writer.add_scalar('loss/test', mean_loss / len(test_ds), epoch)
writer.add_scalar('accuracy/test', acc, epoch)
utils.log.save_statistics(y_true_val, y_pred_val, model, os.path.join(log_dir, 'val'), (160, 6))
utils.log.save_statistics(y_true_test, y_pred_test, model, os.path.join(log_dir, 'test'), (160, 6))
writer.flush()
with open(os.path.join(log_dir, 'results.json'), 'w') as f:
f.write(json.dumps(results))
results_timer = {}
dummy_input = torch.randn(1, 160, 6, dtype=torch.float).to(device)
repetitions = 300
timings = np.zeros((repetitions, 1))
# GPU-WARM-UP
for _ in range(10):
_ = model(dummy_input)
with torch.no_grad():
for rep in range(repetitions):
start_time = time.time()
out = model(dummy_input.float())
_, predicted = torch.max(out.data, 1)
end_time = time.time()
timings[rep] = (end_time - start_time) * 1000
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn)
results_timer['mean'] = mean_syn
results_timer['std'] = std_syn
results_timer['unit'] = 'ms'
with open(os.path.join(log_dir, 'inference_time.json'), 'w') as f:
f.write(json.dumps(results_timer))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-path', type=str, required=True)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--batch-size', type=int, default=256)
parser.add_argument('--num-classes', type=int, default=8)
parser.add_argument('--levels', type=int, default=16)
parser.add_argument('--sequence-length', type=int, default=160)
parser.add_argument('--nhid', type=int, default=25)
parser.add_argument('--feed-forward', type=int, default=256)
parser.add_argument('--dropout', type=float, default=.2)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.999)
parser.add_argument('--weight-decay', type=float, default=1e-3)
args, _ = parser.parse_known_args()
main(args)
| 33.55642
| 116
| 0.571545
|
acfc37e1767014ecf43943b6423859230993fd3e
| 1,216
|
py
|
Python
|
lib/surface/compute/vpn_tunnels/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/vpn_tunnels/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/surface/compute/vpn_tunnels/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating VPN Gateways."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class VpnTunnels(base.Group):
"""Read and manipulate Google Compute Engine VPN Tunnels."""
# Placeholder to indicate that a detailed_help field exists and should
# be set outside the class definition.
detailed_help = None
VpnTunnels.category = base.NETWORKING_CATEGORY
VpnTunnels.detailed_help = {
'brief': 'Read and manipulate Google Compute Engine VPN Tunnels'
}
| 32.864865
| 74
| 0.768914
|
acfc39ffb948171bfc5634b228ff6c47de8ae7bb
| 827
|
py
|
Python
|
external/workload-automation/wa/framework/configuration/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 159
|
2016-01-25T11:08:39.000Z
|
2022-03-28T05:20:41.000Z
|
external/workload-automation/wa/framework/configuration/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 656
|
2016-01-25T11:16:56.000Z
|
2022-03-23T16:03:28.000Z
|
external/workload-automation/wa/framework/configuration/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 127
|
2015-03-11T16:36:17.000Z
|
2022-02-15T02:26:43.000Z
|
# Copyright 2013-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.framework.configuration.core import (settings,
RunConfiguration,
JobGenerator,
ConfigurationPoint)
| 43.526316
| 74
| 0.646917
|
acfc3b075e2ff047c3ccffd4a34b8cd72aea5140
| 1,872
|
py
|
Python
|
scripts/study_case/ID_13/torch_geometric/transforms/polar.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 8
|
2021-06-30T06:55:14.000Z
|
2022-03-18T01:57:14.000Z
|
scripts/study_case/ID_13/torch_geometric/transforms/polar.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T03:08:15.000Z
|
2021-06-30T03:08:15.000Z
|
scripts/study_case/ID_13/torch_geometric/transforms/polar.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 2
|
2021-11-17T11:19:48.000Z
|
2021-11-18T03:05:58.000Z
|
from math import pi as PI
import torch
class Polar(object):
r"""Saves the globally normalized two-dimensional spatial relation of
linked nodes as polar coordinates (mapped to the fixed interval
:math:`[0, 1]`) in its edge attributes.
Args:
cat (bool, optional): Concat pseudo-coordinates to edge attributes
instead of replacing them. (default: :obj:`True`)
.. testsetup::
import torch
from scripts.study_case.ID_13.torch_geometric.data import Data
.. testcode::
from scripts.study_case.ID_13.torch_geometric.transforms import Polar
pos = torch.tensor([[-1, 0], [0, 0], [0, 2]], dtype=torch.float)
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
data = Data(edge_index=edge_index, pos=pos)
data = Polar()(data)
print(data.edge_attr)
.. testoutput::
tensor([[0.5000, 0.0000],
[0.5000, 0.5000],
[1.0000, 0.2500],
[1.0000, 0.7500]])
"""
def __init__(self, cat=True):
self.cat = cat
def __call__(self, data):
(row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr
assert pos.dim() == 2 and pos.size(1) == 2
cart = pos[col] - pos[row]
rho = torch.norm(cart, p=2, dim=-1)
rho = rho / rho.max()
theta = torch.atan2(cart[..., 1], cart[..., 0]) / (2 * PI)
theta += (theta < 0).type_as(theta)
polar = torch.stack([rho, theta], dim=1)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data.edge_attr = torch.cat([pseudo, polar.type_as(pos)], dim=-1)
else:
data.edge_attr = polar
return data
def __repr__(self):
return '{}(cat={})'.format(self.__class__.__name__, self.cat)
| 29.25
| 77
| 0.567842
|
acfc3b089df27a23ee1799bf979b5b4e06eaa3f6
| 6,833
|
py
|
Python
|
retrieval/evaluate_recall_testrank.py
|
mikacuy/deformation_aware_embedding
|
7a2cef54328c51d2bfc582fdd5b119a24e19a9ca
|
[
"MIT"
] | 40
|
2020-09-11T01:17:19.000Z
|
2022-03-08T23:22:45.000Z
|
retrieval/evaluate_recall_testrank.py
|
star-cold/deformation_aware_embedding
|
d5982209f072015bdc16abf281cb0f045b928720
|
[
"MIT"
] | 2
|
2021-02-16T21:41:00.000Z
|
2021-02-25T04:06:59.000Z
|
retrieval/evaluate_recall_testrank.py
|
star-cold/deformation_aware_embedding
|
d5982209f072015bdc16abf281cb0f045b928720
|
[
"MIT"
] | 11
|
2020-09-26T08:42:02.000Z
|
2022-01-12T09:29:03.000Z
|
import os
import sys
import numpy as np
import random
import json
import argparse
import pickle
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(libpath + '/../pyRender/lib')
sys.path.append(libpath + '/../pyRender/src')
sys.path.append(libpath + '/..')
import objloader
import mesh_utils
import time
import h5py
import math
import tensorflow as tf
sys.path.append(os.path.join(libpath, 'tf_ops/nn_distance'))
import tf_nndistance
np.random.seed(0)
start_time = time.time()
##############h5 file handles
def save_dataset(fname, pcs):
cloud = np.stack([pc for pc in pcs])
fout = h5py.File(fname)
fout.create_dataset('data', data=cloud, compression='gzip', dtype='float32')
fout.close()
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
return data
################################
with open('../shapenetcore_v2_split.json') as json_file:
data = json.load(json_file)
SHAPENET_BASEDIR = '/orion/group/ShapeNetManifold_10000_simplified/'
parser = argparse.ArgumentParser()
parser.add_argument('--category', default='table', help='Which class')
parser.add_argument('--data_split', default = "test", help='which data split to use')
parser.add_argument('--dump_dir', default='dump_table_regloss_euclidean_testrank/', help='dump folder path [dump]')
#
parser.add_argument('--fitting_dump_dir', default='test_rank_point2mesh/', help='dump folder path after fitting')
# parser.add_argument('--fitting_dump_dir', default='deformation_parallel_newcost_2cd/', help='dump folder path after fitting')
parser.add_argument('--to_deform', default=True, help='with or without deformation')
parser.add_argument('--num_neighbors', type=int, default=3, help='Number of neighbors to retrieve')
parser.add_argument('--K', type=int, default=5, help='Top K are considered as true positives')
FLAGS = parser.parse_args()
OBJ_CAT = FLAGS.category
DATA_SPLIT = FLAGS.data_split
NUM_NEIGHBORS = FLAGS.num_neighbors
DUMP_DIR = str(FLAGS.dump_dir)
print(DUMP_DIR)
K = FLAGS.K
# exit()
if (OBJ_CAT == "car" or OBJ_CAT == "airplane"):
with open('../shapenetcore_v2_split2.json') as json_file:
data = json.load(json_file)
FITTING_DUMP_DIR = os.path.join(DUMP_DIR, FLAGS.fitting_dump_dir)
if not os.path.exists(FITTING_DUMP_DIR): os.mkdir(FITTING_DUMP_DIR)
LOG_FOUT = open(os.path.join(FITTING_DUMP_DIR, 'log_recall_'+str(K)+'.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
TO_DEFORM = FLAGS.to_deform
print("Deform "+str(TO_DEFORM))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
data = data[DATA_SPLIT]
num_categories = len(list(data.keys()))
cat_idx = -1
for i in range(num_categories):
if (data[str(i)]["category"] == OBJ_CAT):
cat_idx = str(i)
break
#Retrieved neighbor indices
pickle_in = open(os.path.join(DUMP_DIR, "neighbors.pickle"),"rb")
neighbors_idxs = pickle.load(pickle_in)
shapes = data[str(cat_idx)]
synsetid = shapes["synsetid"]
model_names = shapes["model_names"]
num_samples = shapes["num_samples"]
NUM_POINT = 2048
####Get candidates
pickle_in = open('../candidate_generation/candidates_'+DATA_SPLIT+'_'+OBJ_CAT+'_testrank.pickle',"rb")
database_candidate_idxs = pickle.load(pickle_in)
pickle_in.close()
NUM_CANDIDATES = len(database_candidate_idxs[0])
####Get pre-computed deformed chamfer distance
FOL = "chamfer_distance_deformed_candidates/"
pickle_in = open(os.path.join(FOL, "testrank_candidates_"+DATA_SPLIT +"_"+OBJ_CAT+"_point2mesh.pickle"))
database_deformedCD_costs = pickle.load(pickle_in)
pickle_in.close()
pickle_in = open(os.path.join(FOL, "testrank_candidates_"+DATA_SPLIT +"_"+OBJ_CAT+"_point2mesh_undeformed.pickle"))
database_CD_costs = pickle.load(pickle_in)
pickle_in.close()
def chamfer_loss(pc1, pc2):
""" pred: BxNx3,
label: BxNx3, """
dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pc1, pc2)
# loss = dists_forward+dists_backward
loss = tf.reduce_mean(dists_forward+dists_backward, axis=1)
return loss
with tf.Graph().as_default():
with tf.device('/gpu:0'):
pointclouds_pl_1 = tf.placeholder(tf.float32, shape=(NUM_CANDIDATES, NUM_POINT, 3))
pointclouds_pl_2 = tf.placeholder(tf.float32, shape=(NUM_CANDIDATES, NUM_POINT, 3))
chamfer_distance = chamfer_loss(pointclouds_pl_1, pointclouds_pl_2)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl_1': pointclouds_pl_1,
'pointclouds_pl_2': pointclouds_pl_2,
'chamfer_distance': chamfer_distance,
}
# fname = DATA_SPLIT +"_"+OBJ_CAT+"_meshsampled.h5"
fname = '../candidate_generation/' + DATA_SPLIT +"_"+OBJ_CAT + '.h5'
OBJ_POINTCLOUDS = load_h5(fname)
print(OBJ_POINTCLOUDS.shape)
all_cd = []
all_deformed_cd = []
recall = [0]*NUM_NEIGHBORS
num_evaluated = 0
for i in range(len(model_names)):
chamfer_distances = database_CD_costs[i]
##Deformed CD
deformed_chamfer_distances = database_deformedCD_costs[i]
deformed_chamfer_distances[np.argwhere(deformed_chamfer_distances==-1)] = 1e16 #those with invalid deformation
###Index of retrieved neighbors out of the 150 candidates
retrieved_neighbors_idx = neighbors_idxs[i]
retrieved_chamfer_distances = chamfer_distances[retrieved_neighbors_idx]
retrieved_deformed_chamfer_distances = deformed_chamfer_distances[retrieved_neighbors_idx]
if (np.max(retrieved_deformed_chamfer_distances) > 1000):
continue
chamfer_distance_idx_sorted = np.argsort(chamfer_distances)
deformed_chamfer_distance_idx_sorted = np.argsort(deformed_chamfer_distances)
num_evaluated += 1
true_neighbors = deformed_chamfer_distance_idx_sorted[:K]
for j in range(NUM_NEIGHBORS):
if retrieved_neighbors_idx[j] in true_neighbors:
recall[j]+=1
break
all_cd.append(retrieved_chamfer_distances)
all_deformed_cd.append(retrieved_deformed_chamfer_distances)
all_cd = np.array(all_cd)
all_deformed_cd = np.array(all_deformed_cd)
recall=(np.cumsum(recall)/float(num_evaluated))*100
print(recall)
for i in range(NUM_NEIGHBORS):
i_mean_cd = np.mean(all_cd[:,i])
i_mean_deformed_cd = np.mean(all_deformed_cd[:,i])
log_string("Rank "+ str(i+1) + " retrieved mean CD error: "+str(i_mean_cd))
log_string("Rank "+ str(i+1) + " retrieved mean deformed CD error: "+str(i_mean_deformed_cd))
log_string(" ")
log_string(" ")
log_string("Recall")
log_string("K= "+str(K))
log_string(" ")
for i in range(NUM_NEIGHBORS):
log_string("Recall@"+ str(i+1) + ": "+str(recall[i]))
log_string(" ")
print("Total running time: "+str(time.time()-start_time)+" sec")
LOG_FOUT.close()
| 31.059091
| 127
| 0.74067
|
acfc3b864827c156f58af8af6ee16c18eefeee1e
| 20,913
|
py
|
Python
|
menpofit/error/base.py
|
yuxiang-zhou/menpofit
|
a74d87df8979c683019ea518bcf7729a76cc603d
|
[
"BSD-3-Clause"
] | 124
|
2015-01-12T07:39:18.000Z
|
2022-03-31T05:27:07.000Z
|
menpofit/error/base.py
|
yuxiang-zhou/menpofit
|
a74d87df8979c683019ea518bcf7729a76cc603d
|
[
"BSD-3-Clause"
] | 80
|
2015-01-05T16:17:39.000Z
|
2020-11-22T13:42:00.000Z
|
menpofit/error/base.py
|
yuxiang-zhou/menpofit
|
a74d87df8979c683019ea518bcf7729a76cc603d
|
[
"BSD-3-Clause"
] | 64
|
2015-02-02T15:11:38.000Z
|
2022-02-28T06:19:31.000Z
|
from functools import wraps, partial
import numpy as np
from menpo.shape import PointCloud
def pointcloud_to_points(wrapped):
@wraps(wrapped)
def wrapper(*args, **kwargs):
args = list(args)
for index, arg in enumerate(args):
if isinstance(arg, PointCloud):
args[index] = arg.points
for key in kwargs:
if isinstance(kwargs[key], PointCloud):
kwargs[key] = kwargs[key].points
return wrapped(*args, **kwargs)
return wrapper
# BOUNDING BOX NORMALISERS
def bb_area(shape):
r"""
Computes the area of the bounding box of the provided shape,
i.e.
.. math::
h w
where :math:`h` and :math:`w` are the height and width of the bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud` or `subclass`
The input shape.
Returns
-------
bb_area : `float`
The area of the bounding box.
"""
# Area = w * h
height, width = np.max(shape, axis=0) - np.min(shape, axis=0)
return height * width
def bb_perimeter(shape):
r"""
Computes the perimeter of the bounding box of the provided shape, i.e.
.. math::
2(h + w)
where :math:`h` and :math:`w` are the height and width of the bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud` or `subclass`
The input shape.
Returns
-------
bb_perimeter : `float`
The perimeter of the bounding box.
"""
# Area = 2(w + h)
height, width = np.max(shape, axis=0) - np.min(shape, axis=0)
return 2 * (height + width)
def bb_avg_edge_length(shape):
r"""
Computes the average edge length of the bounding box of the provided shape,
i.e.
.. math::
\frac{h + w}{2} = \frac{2h + 2w}{4}
where :math:`h` and :math:`w` are the height and width of the bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud` or `subclass`
The input shape.
Returns
-------
bb_avg_edge_length : `float`
The average edge length of the bounding box.
"""
# 0.5(w + h) = (2w + 2h) / 4
height, width = np.max(shape, axis=0) - np.min(shape, axis=0)
return 0.5 * (height + width)
def bb_diagonal(shape):
r"""
Computes the diagonal of the bounding box of the provided shape, i.e.
.. math::
\sqrt{h^2 + w^2}
where :math:`h` and :math:`w` are the height and width of the bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud` or `subclass`
The input shape.
Returns
-------
bb_diagonal : `float`
The diagonal of the bounding box.
"""
# sqrt(w**2 + h**2)
height, width = np.max(shape, axis=0) - np.min(shape, axis=0)
return np.sqrt(width ** 2 + height ** 2)
bb_norm_types = {
'avg_edge_length': bb_avg_edge_length,
'perimeter': bb_perimeter,
'diagonal': bb_diagonal,
'area': bb_area
}
# EUCLIDEAN AND ROOT MEAN SQUARE ERRORS
@pointcloud_to_points
def root_mean_square_error(shape, gt_shape):
r"""
Computes the root mean square error between two shapes, i.e.
.. math::
\sqrt{\frac{1}{N}\sum_{i=1}^N(s_i-s^*_i)^2}
where :math:`s_i` and :math:`s^*_i` are the coordinates of the :math:`i`'th
point of the final and ground truth shapes, and :math:`N` is the total
number of points.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
Returns
-------
root_mean_square_error : `float`
The root mean square error.
"""
return np.sqrt(np.mean((shape.ravel() - gt_shape.ravel()) ** 2))
@pointcloud_to_points
def euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error between two shapes, i.e.
.. math::
\frac{1}{N}\sum_{i=1}^N\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of the
:math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape and :math:`N` is the total number of points.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
Returns
-------
root_mean_square_error : `float`
The Euclidean error.
"""
return np.mean(np.sqrt(np.sum((shape - gt_shape) ** 2, axis=-1)))
# DISTANCE NORMALISER
def distance_two_indices(index1, index2, shape):
r"""
Computes the Euclidean distance between two points of a shape, i.e.
.. math::
\sqrt{(s_{i,x}-s_{j,x})^2 + (s_{i,y}-s_{j,y})^2}
where :math:`s_{i,x}`, :math:`s_{i,y}` are the `x` and `y` coordinates of
the :math:`i`'th point (`index1`) and :math:`s_{j,x}`, :math:`s_{j,y}` are
the `x` and `y` coordinates of the :math:`j`'th point (`index2`).
Parameters
----------
index1 : `int`
The index of the first point.
index2 : `int`
The index of the second point.
shape : `menpo.shape.PointCloud`
The input shape.
Returns
-------
distance_two_indices : `float`
The Euclidean distance between the points.
"""
return euclidean_error(shape[index1], shape[index2])
# GENERIC NORMALISED ERROR FUNCTIONS
@pointcloud_to_points
def bb_normalised_error(shape_error_f, shape, gt_shape,
norm_shape=None, norm_type='avg_edge_length'):
r"""
Computes an error normalised by a measure based on the shape's bounding
box, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where :math:`\mathcal{F}(s,s^*)` is an error metric function between the
final shape :math:`s` and the ground truth shape :math:`s^*` and
:math:`\mathcal{N}(s^*)` is a normalising function that returns a measure
based on the ground truth shape's bounding box.
Parameters
----------
shape_error_f : `callable`
The function to be used for computing the error.
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
norm_shape : `menpo.shape.PointCloud` or ``None``, optional
The shape to be used to compute the normaliser. If ``None``, then the
ground truth shape is used.
norm_type : ``{'area', 'perimeter', 'avg_edge_length', 'diagonal'}``, optional
The type of the normaliser. Possible options are:
========================= ==========================================
Method Description
========================= ==========================================
:map:`bb_area` Area of `norm_shape`'s bounding box
:map:`bb_perimeter` Perimeter of `norm_shape`'s bounding box
:map:`bb_avg_edge_length` Average edge length of `norm_shape`'s bbox
:map:`bb_diagonal` Diagonal of `norm_shape`'s bounding box
========================= ==========================================
Returns
-------
normalised_error : `float`
The computed normalised error.
"""
if norm_type not in bb_norm_types:
raise ValueError('norm_type must be one of '
'{avg_edge_length, perimeter, diagonal, area}.')
if norm_shape is None:
norm_shape = gt_shape
return (shape_error_f(shape, gt_shape) /
bb_norm_types[norm_type](norm_shape))
@pointcloud_to_points
def distance_normalised_error(shape_error_f, distance_norm_f, shape, gt_shape):
r"""
Computes an error normalised by a distance measure between two shapes, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s,s^*)}
where :math:`\mathcal{F}(s,s^*)` is an error metric function between the
final shape :math:`s` and the ground truth shape :math:`s^*` and
:math:`\mathcal{N}(s,s^*)` is a normalising function based on a distance
metric between the two shapes.
Parameters
----------
shape_error_f : `callable`
The function to be used for computing the error.
distance_norm_f : `callable`
The function to be used for computing the normalisation distance metric.
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
Returns
-------
normalised_error : `float`
The computed normalised error.
"""
return shape_error_f(shape, gt_shape) / distance_norm_f(shape, gt_shape)
@pointcloud_to_points
def distance_indexed_normalised_error(shape_error_f, index1, index2, shape,
gt_shape):
r"""
Computes an error normalised by the distance measure between two points
of the ground truth shape, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where :math:`\mathcal{F}(s,s^*)` is an error metric function between the
final shape :math:`s` and the ground truth shape :math:`s^*` and
:math:`\mathcal{N}(s^*)` is a normalising function that returns the
distance between two points of the ground truth shape.
Parameters
----------
shape_error_f : `callable`
The function to be used for computing the error.
index1 : `int`
The index of the first point.
index2 : `int`
The index of the second point.
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
Returns
-------
normalised_error : `float`
The computed normalised error.
"""
return shape_error_f(shape, gt_shape) / distance_two_indices(index1, index2,
gt_shape)
# EUCLIDEAN AND ROOT MEAN SQUARE NORMALISED ERRORS
def root_mean_square_bb_normalised_error(shape, gt_shape, norm_shape=None,
norm_type='avg_edge_length'):
r"""
Computes the root mean square error between two shapes normalised by a
measure based on the ground truth shape's bounding box, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \sqrt{\frac{1}{N}\sum_{i=1}^N(s_i-s^*_i)^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`s_i` and :math:`s^*_i` are the coordinates of the
:math:`i`'th point of the final and ground truth shapes, and :math:`N` is
the total number of points. Finally, :math:`\mathcal{N}(s^*)` is a
normalising function that returns a measure based on the ground truth
shape's bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
norm_shape : `menpo.shape.PointCloud` or ``None``, optional
The shape to be used to compute the normaliser. If ``None``, then the
ground truth shape is used.
norm_type : ``{'area', 'perimeter', 'avg_edge_length', 'diagonal'}``, optional
The type of the normaliser. Possible options are:
========================= ==========================================
Method Description
========================= ==========================================
:map:`bb_area` Area of `norm_shape`'s bounding box
:map:`bb_perimeter` Perimeter of `norm_shape`'s bounding box
:map:`bb_avg_edge_length` Average edge length of `norm_shape`'s bbox
:map:`bb_diagonal` Diagonal of `norm_shape`'s bounding box
========================= ==========================================
Returns
-------
error : `float`
The computed root mean square normalised error.
"""
return bb_normalised_error(shape_error_f=root_mean_square_error,
shape=shape, gt_shape=gt_shape,
norm_shape=norm_shape, norm_type=norm_type)
def root_mean_square_distance_normalised_error(shape, gt_shape,
distance_norm_f):
r"""
Computes the root mean square error between two shapes normalised by a
distance measure between two shapes, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s,s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \sqrt{\frac{1}{N}\sum_{i=1}^N(s_i-s^*_i)^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`s_i` and :math:`s^*_i` are the coordinates of the
:math:`i`'th point of the final and ground truth shapes, and :math:`N` is
the total number of points. Finally, :math:`\mathcal{N}(s,s^*)` is a
normalising function based on a distance metric between the two shapes.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
distance_norm_f : `callable`
The function to be used for computing the normalisation distance metric.
Returns
-------
error : `float`
The computed root mean square normalised error.
"""
return distance_normalised_error(shape_error_f=root_mean_square_error,
distance_norm_f=distance_norm_f,
shape=shape, gt_shape=gt_shape)
def root_mean_square_distance_indexed_normalised_error(shape, gt_shape,
index1, index2):
r"""
Computes the root mean square error between two shapes normalised by the
distance measure between two points of the ground truth shape, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \sqrt{\frac{1}{N}\sum_{i=1}^N(s_i-s^*_i)^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`s_i` and :math:`s^*_i` are the coordinates of the
:math:`i`'th point of the final and ground truth shapes, and :math:`N` is
the total number of points. Finally, :math:`\mathcal{N}(s^*)` is a
normalising function that returns the distance between two points of the
ground truth shape.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
index1 : `int`
The index of the first point.
index2 : `int`
The index of the second point.
Returns
-------
error : `float`
The computed root mean square normalised error.
"""
return distance_indexed_normalised_error(
shape_error_f=root_mean_square_error, index1=index1, index2=index2,
shape=shape, gt_shape=gt_shape)
def euclidean_bb_normalised_error(shape, gt_shape, norm_shape=None,
norm_type='avg_edge_length'):
r"""
Computes the Euclidean error between two shapes normalised by a measure
based on the ground truth shape's bounding box, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{N}\sum_{i=1}^N\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape and :math:`N` is the total number of points. Finally,
:math:`\mathcal{N}(s^*)` is a normalising function that returns a measure
based on the ground truth shape's bounding box.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
norm_shape : `menpo.shape.PointCloud` or ``None``, optional
The shape to be used to compute the normaliser. If ``None``, then the
ground truth shape is used.
norm_type : ``{'area', 'perimeter', 'avg_edge_length', 'diagonal'}``, optional
The type of the normaliser. Possible options are:
========================= ==========================================
Method Description
========================= ==========================================
:map:`bb_area` Area of `norm_shape`'s bounding box
:map:`bb_perimeter` Perimeter of `norm_shape`'s bounding box
:map:`bb_avg_edge_length` Average edge length of `norm_shape`'s bbox
:map:`bb_diagonal` Diagonal of `norm_shape`'s bounding box
========================= ==========================================
Returns
-------
error : `float`
The computed Euclidean normalised error.
"""
return bb_normalised_error(shape_error_f=euclidean_error,
shape=shape, gt_shape=gt_shape,
norm_shape=norm_shape, norm_type=norm_type)
def euclidean_distance_normalised_error(shape, gt_shape, distance_norm_f):
r"""
Computes the Euclidean error between two shapes normalised by a distance
measure between two shapes, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s,s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{N}\sum_{i=1}^N\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape and :math:`N` is the total number of points. Finally,
:math:`\mathcal{N}(s,s^*)` is a normalising function based on a distance
metric between the two shapes.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
distance_norm_f : `callable`
The function to be used for computing the normalisation distance metric.
Returns
-------
error : `float`
The computed Euclidean normalised error.
"""
return distance_normalised_error(shape_error_f=euclidean_error,
distance_norm_f=distance_norm_f,
shape=shape, gt_shape=gt_shape)
def euclidean_distance_indexed_normalised_error(shape, gt_shape, index1,
index2):
r"""
Computes the Euclidean error between two shapes normalised by the
distance measure between two points of the ground truth shape, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{N}\sum_{i=1}^N\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape and :math:`N` is the total number of points. Finally,
:math:`\mathcal{N}(s^*)` is a normalising function that returns the
distance between two points of the ground truth shape.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure).
gt_shape : `menpo.shape.PointCloud`
The ground truth shape.
index1 : `int`
The index of the first point.
index2 : `int`
The index of the second point.
Returns
-------
error : `float`
The computed Euclidean normalised error.
"""
return distance_indexed_normalised_error(
shape_error_f=euclidean_error, index1=index1, index2=index2,
shape=shape, gt_shape=gt_shape)
| 34.681592
| 103
| 0.591068
|
acfc3c31b47c44fd7ede7a6c340204588397c91c
| 1,303
|
py
|
Python
|
djangomom/djangomom/urls.py
|
emiamar/d
|
abfd0ca81224a1259fdfac92ed21ad771d901e18
|
[
"BSD-3-Clause"
] | null | null | null |
djangomom/djangomom/urls.py
|
emiamar/d
|
abfd0ca81224a1259fdfac92ed21ad771d901e18
|
[
"BSD-3-Clause"
] | 2
|
2018-02-27T07:56:18.000Z
|
2018-03-09T12:45:48.000Z
|
djangomom/djangomom/urls.py
|
emiamar/d
|
abfd0ca81224a1259fdfac92ed21ad771d901e18
|
[
"BSD-3-Clause"
] | 2
|
2018-02-21T07:43:04.000Z
|
2018-11-10T18:09:26.000Z
|
"""djangomom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^projects/', include('project.urls',
namespace='projects')),
url(r'^apps/', include('app.urls',
namespace='apps')),
url(r'^models/', include('modeller.urls',
namespace='models')),
url(r'^account/', include('account.urls',
namespace='account')),
url(r'^', include('core.urls',
namespace='core')),
url(r'^endpoint/', include('endpoint.urls',
namespace='endpoint')),
url(r'^serializer/', include('serializer.urls',
namespace='serializer')),
]
| 35.216216
| 79
| 0.653108
|
acfc3c82201ba2fb2ebf759c30cd1fac3a5da74b
| 823
|
py
|
Python
|
test-cms/manage.py
|
DianaHov/some-test-rep
|
0902c70a44f925aae2121f419ae5edd650af48f4
|
[
"MIT"
] | null | null | null |
test-cms/manage.py
|
DianaHov/some-test-rep
|
0902c70a44f925aae2121f419ae5edd650af48f4
|
[
"MIT"
] | null | null | null |
test-cms/manage.py
|
DianaHov/some-test-rep
|
0902c70a44f925aae2121f419ae5edd650af48f4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.782609
| 78
| 0.623329
|
acfc3d42e0c66664a94d9140c9ad5082173721e2
| 1,195
|
py
|
Python
|
code/analysis/variationalbayes/torch_hessian.py
|
dmytrov/stochasticcontrol
|
a289d5c0953c4a328b2177f51168588248c00f2c
|
[
"MIT"
] | null | null | null |
code/analysis/variationalbayes/torch_hessian.py
|
dmytrov/stochasticcontrol
|
a289d5c0953c4a328b2177f51168588248c00f2c
|
[
"MIT"
] | null | null | null |
code/analysis/variationalbayes/torch_hessian.py
|
dmytrov/stochasticcontrol
|
a289d5c0953c4a328b2177f51168588248c00f2c
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import grad
def hessian(loss, params):
loss_grad = grad(loss, params, create_graph=True)
cnt = 0
for g in loss_grad:
g_vector = g.contiguous().view(-1) if cnt == 0 else torch.cat([g_vector, g.contiguous().view(-1)])
cnt = 1
l = g_vector.size(0)
h = []
for idx in range(l):
grad2rd = grad(g_vector[idx], params, create_graph=True)
cnt = 0
for g in grad2rd:
g2 = g.contiguous().view(-1) if cnt == 0 else torch.cat([g2, g.contiguous().view(-1)])
cnt = 1
h.append(g2)
return torch.stack(h) #.cpu().data.numpy()
def function_hessian(y, x):
# compute hessian
x_1grad = grad(y, x, create_graph=True)[0]
print("x_1grad:", x_1grad)
h = []
for i in range(x_1grad.size(0)):
x_2grad = grad(x_1grad[i], x, create_graph=True)[0]
h.append(x_2grad)
h = torch.stack(h)
return h
a = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
b = torch.tensor([4.0, 5.0, 6.0], requires_grad=True)
print("a:", a)
print("b:", b)
y = torch.sum(a*a + a[:, None]*b[None, :])
print("y:", y)
hy = hessian(y, [a, b])
print("hy:", hy)
| 26.555556
| 106
| 0.571548
|
acfc3d9a9e82275f4f589d63f6fc3061e394b9c9
| 702
|
py
|
Python
|
tripadvisor/forms.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | 7
|
2018-06-26T14:02:32.000Z
|
2022-01-14T01:42:19.000Z
|
tripadvisor/forms.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | null | null | null |
tripadvisor/forms.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | 1
|
2020-03-27T15:48:11.000Z
|
2020-03-27T15:48:11.000Z
|
# -*- coding: utf-8 -*-
from django import forms
from tripadvisor.models import Destination, Link
class DestinationForm(forms.ModelForm):
class Meta:
model = Destination
fields = '__all__'
def __init__(self, *args, **kwargs):
super(DestinationForm, self).__init__(*args, **kwargs)
self.fields['parent'].queryset = Destination.objects.filter(parent__isnull=True)
class LinkForm(forms.ModelForm):
class Meta:
model = Link
exclude = ['executed']
def __init__(self, *args, **kwargs):
super(LinkForm, self).__init__(*args, **kwargs)
self.fields['destination'].queryset = Destination.objects.filter(parent__isnull=False)
| 28.08
| 94
| 0.669516
|
acfc3dff2a4480e2ff3bcc602f2394905c23d7a0
| 3,603
|
py
|
Python
|
cogs/Twitch.py
|
JegSnakkerTysk/MornBot
|
350cab8223037306540af7f0b934deccb9efdc9a
|
[
"MIT"
] | null | null | null |
cogs/Twitch.py
|
JegSnakkerTysk/MornBot
|
350cab8223037306540af7f0b934deccb9efdc9a
|
[
"MIT"
] | null | null | null |
cogs/Twitch.py
|
JegSnakkerTysk/MornBot
|
350cab8223037306540af7f0b934deccb9efdc9a
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import discord
from codecs import open
from json import load as json_load
import locale
from requests import get
from cogs.utils import Defaults, LBlend_utils
with open('config.json', 'r', encoding='utf8') as f:
config = json_load(f)
prefix = config['prefix']
twitch_api_key = config['twitch_api_key']
locale.setlocale(locale.LC_ALL, '')
class Twitch(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 5, commands.BucketType.guild)
@commands.command(aliases=['twitchuser', 'twitchstream'])
async def twitch(self, ctx, bruker):
"""Viser informasjon om en Twitch-bruker"""
async with ctx.channel.typing():
bruker = await LBlend_utils.input_sanitizer(bruker)
user_data = get(f'https://api.twitch.tv/kraken/users/{bruker}?client_id={twitch_api_key}').json()
follow_count_data = get(f'https://api.twitch.tv/kraken/channels/{bruker}/follows?' +
f'client_id={twitch_api_key}').json()
livestream_data = get(f'https://api.twitch.tv/kraken/streams/{bruker}?client_id={twitch_api_key}').json()
try:
profile_pic = user_data['logo']
except KeyError:
return await Defaults.error_fatal_send(ctx, text='Fant ikke bruker!\n\n' +
f'Skriv `{prefix}help {ctx.command}` for hjelp')
username = user_data['display_name']
name = user_data['name']
bio = user_data['bio']
creation_date = user_data['created_at']
creation_date_formatted = f'{creation_date[8:10]}.{creation_date[5:7]}.{creation_date[:4]}'
user_url = f'https://twitch.tv/{name}'
follow_count = follow_count_data['_total']
follow_count = locale.format_string('%d', follow_count, grouping=True)
embed = discord.Embed(title=username, color=0x392E5C, url=user_url)
embed.set_author(name='Twitch', icon_url='http://www.gamergiving.org/wp-content/' +
'uploads/2016/03/twitch11.png')
embed.set_thumbnail(url=profile_pic)
embed.add_field(name='📝 Bio', value=bio, inline=False)
embed.add_field(name='👥 Følgere', value=follow_count)
embed.add_field(name='📅 Opprettet', value=creation_date_formatted)
await Defaults.set_footer(ctx, embed)
try:
livestream_title = livestream_data['stream']['channel']['status']
livestream_game = livestream_data['stream']['game']
livestream_preview = livestream_data['stream']['preview']['large']
views = livestream_data['stream']['viewers']
views = locale.format_string('%d', views, grouping=True)
except TypeError:
return await ctx.send(embed=embed)
embed.add_field(name='🔴 Sender direkte nå', value=f'**Antall som ser på:**\n{views}\n\n' +
f'**Tittel:**\n{livestream_title}\n\n' +
f'**Spill:**\n{livestream_game}', inline=False)
embed.set_image(url=livestream_preview)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Twitch(bot))
| 45.0375
| 121
| 0.572856
|
acfc3ef6b775fbe919bf98e8e109da97bdbd6f84
| 716
|
py
|
Python
|
nVision/tests/test_pca.py
|
OlivierRynne/nVision
|
b8c3280e1e1bfa5fbc88dfcea3e3d8453157b865
|
[
"BSD-2-Clause"
] | 1
|
2019-05-30T13:52:14.000Z
|
2019-05-30T13:52:14.000Z
|
nVision/tests/test_pca.py
|
OlivierRynne/nVision
|
b8c3280e1e1bfa5fbc88dfcea3e3d8453157b865
|
[
"BSD-2-Clause"
] | 1
|
2019-05-29T15:39:02.000Z
|
2019-05-29T15:39:02.000Z
|
nVision/tests/test_pca.py
|
OlivierRynne/nVision
|
b8c3280e1e1bfa5fbc88dfcea3e3d8453157b865
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import sys
import pandas as pd
import numpy.testing as npt
from pandas.util.testing import assert_frame_equal
from nVision import pca
def test_pca_analysis():
expected = np.array([[-np.sqrt(2), 0], [np.sqrt(2), 0]])
actual, pca_model, comps = pca.pca_analysis(data = pd.DataFrame(np.array([[2,0],[0,2]])))
npt.assert_almost_equal(expected, actual)
def test_interaction_features():
expected = pd.DataFrame({'A':[1,1,1],'B':[2,2,2],'C':[3,3,3],'A:A':[1,1,1],'A:B':[2,2,2],'A:C':[3,3,3],'B:B':[4,4,4],'B:C':[6,6,6],'C:C':[9,9,9]})
data = pd.DataFrame({'A':[1,1,1],'B':[2,2,2],'C':[3,3,3]})
actual = pca.interaction_features(data)
assert_frame_equal(expected, actual)
| 34.095238
| 150
| 0.638268
|
acfc3f58424f0a1f5675e9bf091d7cba287115ff
| 39,263
|
py
|
Python
|
venv/lib/python3.8/site-packages/flask_sqlalchemy/__init__.py
|
RonnyLincoln/Rome231-Blogs
|
1eb38b4e8184efe64d925091c760431444f5ac69
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/flask_sqlalchemy/__init__.py
|
RonnyLincoln/Rome231-Blogs
|
1eb38b4e8184efe64d925091c760431444f5ac69
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/flask_sqlalchemy/__init__.py
|
RonnyLincoln/Rome231-Blogs
|
1eb38b4e8184efe64d925091c760431444f5ac69
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import functools
import os
import sys
import time
import warnings
from math import ceil
from operator import itemgetter
from threading import Lock
import sqlalchemy
from flask import _app_ctx_stack, abort, current_app, request
from flask.signals import Namespace
from sqlalchemy import event, inspect, orm
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.session import Session as SessionBase
from flask_sqlalchemy.model import Model
from ._compat import itervalues, string_types, xrange
from .model import DefaultMeta
from . import utils
__version__ = "2.4.4"
# the best timer function for the platform
if sys.platform == 'win32':
if sys.version_info >= (3, 3):
_timer = time.perf_counter
else:
_timer = time.clock
else:
_timer = time.time
_signals = Namespace()
models_committed = _signals.signal('models-committed')
before_models_committed = _signals.signal('before-models-committed')
def _make_table(db):
def _make_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return _make_table
def _set_default_query_class(d, cls):
if 'query_class' not in d:
d['query_class'] = cls
def _wrap_with_default_query_class(fn, cls):
@functools.wraps(fn)
def newfn(*args, **kwargs):
_set_default_query_class(kwargs, cls)
if "backref" in kwargs:
backref = kwargs['backref']
if isinstance(backref, string_types):
backref = (backref, {})
_set_default_query_class(backref[1], cls)
return fn(*args, **kwargs)
return newfn
def _include_sqlalchemy(obj, cls):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# Note: obj.Table does not attempt to be a SQLAlchemy Table class.
obj.Table = _make_table(obj)
obj.relationship = _wrap_with_default_query_class(obj.relationship, cls)
obj.relation = _wrap_with_default_query_class(obj.relation, cls)
obj.dynamic_loader = _wrap_with_default_query_class(obj.dynamic_loader, cls)
obj.event = event
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement,
self.parameters,
self.duration
)
def _calling_context(app_path):
frm = sys._getframe(1)
while frm.f_back is not None:
name = frm.f_globals.get('__name__')
if name and (name == app_path or name.startswith(app_path + '.')):
funcname = frm.f_code.co_name
return '%s:%s (%s)' % (
frm.f_code.co_filename,
frm.f_lineno,
funcname
)
frm = frm.f_back
return '<unknown>'
class SignallingSession(SessionBase):
"""The signalling session is the default session that Flask-SQLAlchemy
uses. It extends the default session system with bind selection and
modification tracking.
If you want to use a different session you can override the
:meth:`SQLAlchemy.create_session` function.
.. versionadded:: 2.0
.. versionadded:: 2.1
The `binds` option was added, which allows a session to be joined
to an external transaction.
"""
def __init__(self, db, autocommit=False, autoflush=True, **options):
#: The application that this session belongs to.
self.app = app = db.get_app()
track_modifications = app.config['SQLALCHEMY_TRACK_MODIFICATIONS']
bind = options.pop('bind', None) or db.engine
binds = options.pop('binds', db.get_binds(app))
if track_modifications is None or track_modifications:
_SessionSignalEvents.register(self)
SessionBase.__init__(
self, autocommit=autocommit, autoflush=autoflush,
bind=bind, binds=binds, **options
)
def get_bind(self, mapper=None, clause=None):
"""Return the engine or connection for a given model or
table, using the ``__bind_key__`` if it is set.
"""
# mapper is None if someone tries to just get a connection
if mapper is not None:
try:
# SA >= 1.3
persist_selectable = mapper.persist_selectable
except AttributeError:
# SA < 1.3
persist_selectable = mapper.mapped_table
info = getattr(persist_selectable, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return SessionBase.get_bind(self, mapper, clause)
class _SessionSignalEvents(object):
@classmethod
def register(cls, session):
if not hasattr(session, '_model_changes'):
session._model_changes = {}
event.listen(session, 'before_flush', cls.record_ops)
event.listen(session, 'before_commit', cls.record_ops)
event.listen(session, 'before_commit', cls.before_commit)
event.listen(session, 'after_commit', cls.after_commit)
event.listen(session, 'after_rollback', cls.after_rollback)
@classmethod
def unregister(cls, session):
if hasattr(session, '_model_changes'):
del session._model_changes
event.remove(session, 'before_flush', cls.record_ops)
event.remove(session, 'before_commit', cls.record_ops)
event.remove(session, 'before_commit', cls.before_commit)
event.remove(session, 'after_commit', cls.after_commit)
event.remove(session, 'after_rollback', cls.after_rollback)
@staticmethod
def record_ops(session, flush_context=None, instances=None):
try:
d = session._model_changes
except AttributeError:
return
for targets, operation in ((session.new, 'insert'), (session.dirty, 'update'), (session.deleted, 'delete')):
for target in targets:
state = inspect(target)
key = state.identity_key if state.has_identity else id(target)
d[key] = (target, operation)
@staticmethod
def before_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
before_models_committed.send(session.app, changes=list(d.values()))
@staticmethod
def after_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
models_committed.send(session.app, changes=list(d.values()))
d.clear()
@staticmethod
def after_rollback(session):
try:
d = session._model_changes
except AttributeError:
return
d.clear()
class _EngineDebuggingSignalEvents(object):
"""Sets up handlers for two events that let us track the execution time of
queries."""
def __init__(self, engine, import_name):
self.engine = engine
self.app_package = import_name
def register(self):
event.listen(
self.engine, 'before_cursor_execute', self.before_cursor_execute
)
event.listen(
self.engine, 'after_cursor_execute', self.after_cursor_execute
)
def before_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
if current_app:
context._query_start_time = _timer()
def after_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
if current_app:
try:
queries = _app_ctx_stack.top.sqlalchemy_queries
except AttributeError:
queries = _app_ctx_stack.top.sqlalchemy_queries = []
queries.append(_DebugQueryTuple((
statement, parameters, context._query_start_time, _timer(),
_calling_context(self.app_package)
)))
def get_debug_queries():
"""In debug mode Flask-SQLAlchemy will log all the SQL queries sent
to the database. This information is available until the end of request
which makes it possible to easily ensure that the SQL generated is the
one expected on errors or in unittesting. If you don't want to enable
the DEBUG mode for your unittests you can also enable the query
recording by setting the ``'SQLALCHEMY_RECORD_QUERIES'`` config variable
to `True`. This is automatically enabled if Flask is in testing mode.
The value returned will be a list of named tuples with the following
attributes:
`statement`
The SQL statement issued
`parameters`
The parameters for the SQL statement
`start_time` / `end_time`
Time the query started / the results arrived. Please keep in mind
that the timer function used depends on your platform. These
values are only useful for sorting or comparing. They do not
necessarily represent an absolute timestamp.
`duration`
Time the query took in seconds
`context`
A string giving a rough estimation of where in your application
query was issued. The exact format is undefined so don't try
to reconstruct filename or function name.
"""
return getattr(_app_ctx_stack.top, 'sqlalchemy_queries', [])
class Pagination(object):
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: the unlimited query object that was used to create this
#: pagination object.
self.query = query
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
if not self.has_prev:
return None
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if not self.has_next:
return None
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class BaseQuery(orm.Query):
"""SQLAlchemy :class:`~sqlalchemy.orm.query.Query` subclass with convenience methods for querying in a web application.
This is the default :attr:`~Model.query` object used for models, and exposed as :attr:`~SQLAlchemy.Query`.
Override the query class for an individual model by subclassing this and setting :attr:`~Model.query_class`.
"""
def get_or_404(self, ident, description=None):
"""Like :meth:`get` but aborts with 404 if not found instead of returning ``None``."""
rv = self.get(ident)
if rv is None:
abort(404, description=description)
return rv
def first_or_404(self, description=None):
"""Like :meth:`first` but aborts with 404 if not found instead of returning ``None``."""
rv = self.first()
if rv is None:
abort(404, description=description)
return rv
def paginate(self, page=None, per_page=None, error_out=True, max_per_page=None):
"""Returns ``per_page`` items from page ``page``.
If ``page`` or ``per_page`` are ``None``, they will be retrieved from
the request query. If ``max_per_page`` is specified, ``per_page`` will
be limited to that value. If there is no request or they aren't in the
query, they default to 1 and 20 respectively.
When ``error_out`` is ``True`` (default), the following rules will
cause a 404 response:
* No items are found and ``page`` is not 1.
* ``page`` is less than 1, or ``per_page`` is negative.
* ``page`` or ``per_page`` are not ints.
When ``error_out`` is ``False``, ``page`` and ``per_page`` default to
1 and 20 respectively.
Returns a :class:`Pagination` object.
"""
if request:
if page is None:
try:
page = int(request.args.get('page', 1))
except (TypeError, ValueError):
if error_out:
abort(404)
page = 1
if per_page is None:
try:
per_page = int(request.args.get('per_page', 20))
except (TypeError, ValueError):
if error_out:
abort(404)
per_page = 20
else:
if page is None:
page = 1
if per_page is None:
per_page = 20
if max_per_page is not None:
per_page = min(per_page, max_per_page)
if page < 1:
if error_out:
abort(404)
else:
page = 1
if per_page < 0:
if error_out:
abort(404)
else:
per_page = 20
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class _QueryProperty(object):
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
def _record_queries(app):
if app.debug:
return True
rq = app.config['SQLALCHEMY_RECORD_QUERIES']
if rq is not None:
return rq
return bool(app.config.get('TESTING'))
class _EngineConnector(object):
def __init__(self, sa, app, bind=None):
self._sa = sa
self._app = app
self._engine = None
self._connected_for = None
self._bind = bind
self._lock = Lock()
def get_uri(self):
if self._bind is None:
return self._app.config['SQLALCHEMY_DATABASE_URI']
binds = self._app.config.get('SQLALCHEMY_BINDS') or ()
assert self._bind in binds, \
'Bind %r is not specified. Set it in the SQLALCHEMY_BINDS ' \
'configuration variable' % self._bind
return binds[self._bind]
def get_engine(self):
with self._lock:
uri = self.get_uri()
echo = self._app.config['SQLALCHEMY_ECHO']
if (uri, echo) == self._connected_for:
return self._engine
sa_url = make_url(uri)
options = self.get_options(sa_url, echo)
self._engine = rv = self._sa.create_engine(sa_url, options)
if _record_queries(self._app):
_EngineDebuggingSignalEvents(self._engine,
self._app.import_name).register()
self._connected_for = (uri, echo)
return rv
def get_options(self, sa_url, echo):
options = {}
self._sa.apply_pool_defaults(self._app, options)
self._sa.apply_driver_hacks(self._app, sa_url, options)
if echo:
options['echo'] = echo
# Give the config options set by a developer explicitly priority
# over decisions FSA makes.
options.update(self._app.config['SQLALCHEMY_ENGINE_OPTIONS'])
# Give options set in SQLAlchemy.__init__() ultimate priority
options.update(self._sa._engine_options)
return options
def get_state(app):
"""Gets the state for the application"""
assert 'sqlalchemy' in app.extensions, \
'The sqlalchemy extension was not registered to the current ' \
'application. Please make sure to call init_app() first.'
return app.extensions['sqlalchemy']
class _SQLAlchemyState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, db):
self.db = db
self.connectors = {}
class SQLAlchemy(object):
"""This class is used to control the SQLAlchemy integration to one
or more Flask applications. Depending on how you initialize the
object it is usable right away or will attach as needed to a
Flask application.
There are two usage modes which work very similarly. One is binding
the instance to a very specific Flask application::
app = Flask(__name__)
db = SQLAlchemy(app)
The second possibility is to create the object once and configure the
application later to support it::
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
db.init_app(app)
return app
The difference between the two is that in the first case methods like
:meth:`create_all` and :meth:`drop_all` will work all the time but in
the second case a :meth:`flask.Flask.app_context` has to exist.
By default Flask-SQLAlchemy will apply some backend-specific settings
to improve your experience with them.
As of SQLAlchemy 0.6 SQLAlchemy
will probe the library for native unicode support. If it detects
unicode it will let the library handle that, otherwise do that itself.
Sometimes this detection can fail in which case you might want to set
``use_native_unicode`` (or the ``SQLALCHEMY_NATIVE_UNICODE`` configuration
key) to ``False``. Note that the configuration key overrides the
value you pass to the constructor. Direct support for ``use_native_unicode``
and SQLALCHEMY_NATIVE_UNICODE are deprecated as of v2.4 and will be removed
in v3.0. ``engine_options`` and ``SQLALCHEMY_ENGINE_OPTIONS`` may be used
instead.
This class also provides access to all the SQLAlchemy functions and classes
from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules. So you can
declare models like this::
class User(db.Model):
username = db.Column(db.String(80), unique=True)
pw_hash = db.Column(db.String(80))
You can still use :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly, but
note that Flask-SQLAlchemy customizations are available only through an
instance of this :class:`SQLAlchemy` class. Query classes default to
:class:`BaseQuery` for `db.Query`, `db.Model.query_class`, and the default
query_class for `db.relationship` and `db.backref`. If you use these
interfaces through :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly,
the default query class will be that of :mod:`sqlalchemy`.
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
The ``session_options`` parameter, if provided, is a dict of parameters
to be passed to the session constructor. See :class:`~sqlalchemy.orm.session.Session`
for the standard options.
The ``engine_options`` parameter, if provided, is a dict of parameters
to be passed to create engine. See :func:`~sqlalchemy.create_engine`
for the standard options. The values given here will be merged with and
override anything set in the ``'SQLALCHEMY_ENGINE_OPTIONS'`` config
variable or othewise set by this library.
.. versionadded:: 0.10
The `session_options` parameter was added.
.. versionadded:: 0.16
`scopefunc` is now accepted on `session_options`. It allows specifying
a custom function which will define the SQLAlchemy session's scoping.
.. versionadded:: 2.1
The `metadata` parameter was added. This allows for setting custom
naming conventions among other, non-trivial things.
The `query_class` parameter was added, to allow customisation
of the query class, in place of the default of :class:`BaseQuery`.
The `model_class` parameter was added, which allows a custom model
class to be used in place of :class:`Model`.
.. versionchanged:: 2.1
Utilise the same query class across `session`, `Model.query` and `Query`.
.. versionadded:: 2.4
The `engine_options` parameter was added.
.. versionchanged:: 2.4
The `use_native_unicode` parameter was deprecated.
.. versionchanged:: 2.4.3
``COMMIT_ON_TEARDOWN`` is deprecated and will be removed in
version 3.1. Call ``db.session.commit()`` directly instead.
"""
#: Default query class used by :attr:`Model.query` and other queries.
#: Customize this by passing ``query_class`` to :func:`SQLAlchemy`.
#: Defaults to :class:`BaseQuery`.
Query = None
def __init__(self, app=None, use_native_unicode=True, session_options=None,
metadata=None, query_class=BaseQuery, model_class=Model,
engine_options=None):
self.use_native_unicode = use_native_unicode
self.Query = query_class
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(model_class, metadata)
self._engine_lock = Lock()
self.app = app
self._engine_options = engine_options or {}
_include_sqlalchemy(self, query_class)
if app is not None:
self.init_app(app)
@property
def metadata(self):
"""The metadata associated with ``db.Model``."""
return self.Model.metadata
def create_scoped_session(self, options=None):
"""Create a :class:`~sqlalchemy.orm.scoping.scoped_session`
on the factory from :meth:`create_session`.
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
:param options: dict of keyword arguments passed to session class in
``create_session``
"""
if options is None:
options = {}
scopefunc = options.pop('scopefunc', _app_ctx_stack.__ident_func__)
options.setdefault('query_cls', self.Query)
return orm.scoped_session(
self.create_session(options), scopefunc=scopefunc
)
def create_session(self, options):
"""Create the session factory used by :meth:`create_scoped_session`.
The factory **must** return an object that SQLAlchemy recognizes as a session,
or registering session events may raise an exception.
Valid factories include a :class:`~sqlalchemy.orm.session.Session`
class or a :class:`~sqlalchemy.orm.session.sessionmaker`.
The default implementation creates a ``sessionmaker`` for :class:`SignallingSession`.
:param options: dict of keyword arguments passed to session class
"""
return orm.sessionmaker(class_=SignallingSession, db=self, **options)
def make_declarative_base(self, model, metadata=None):
"""Creates the declarative base that all models will inherit from.
:param model: base model class (or a tuple of base classes) to pass
to :func:`~sqlalchemy.ext.declarative.declarative_base`. Or a class
returned from ``declarative_base``, in which case a new base class
is not created.
:param metadata: :class:`~sqlalchemy.MetaData` instance to use, or
none to use SQLAlchemy's default.
.. versionchanged 2.3.0::
``model`` can be an existing declarative base in order to support
complex customization such as changing the metaclass.
"""
if not isinstance(model, DeclarativeMeta):
model = declarative_base(
cls=model,
name='Model',
metadata=metadata,
metaclass=DefaultMeta
)
# if user passed in a declarative base and a metaclass for some reason,
# make sure the base uses the metaclass
if metadata is not None and model.metadata is not metadata:
model.metadata = metadata
if not getattr(model, 'query_class', None):
model.query_class = self.Query
model.query = _QueryProperty(self)
return model
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. Never use a database in the context
of an application not initialized that way or connections will
leak.
"""
if (
'SQLALCHEMY_DATABASE_URI' not in app.config and
'SQLALCHEMY_BINDS' not in app.config
):
warnings.warn(
'Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. '
'Defaulting SQLALCHEMY_DATABASE_URI to "sqlite:///:memory:".'
)
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:')
app.config.setdefault('SQLALCHEMY_BINDS', None)
app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
app.config.setdefault('SQLALCHEMY_ECHO', False)
app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)
app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
app.config.setdefault('SQLALCHEMY_MAX_OVERFLOW', None)
app.config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', False)
track_modifications = app.config.setdefault(
'SQLALCHEMY_TRACK_MODIFICATIONS', True
)
app.config.setdefault('SQLALCHEMY_ENGINE_OPTIONS', {})
if track_modifications is None:
warnings.warn(FSADeprecationWarning(
'SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and '
'will be disabled by default in the future. Set it to True '
'or False to suppress this warning.'
))
# Deprecation warnings for config keys that should be replaced by SQLALCHEMY_ENGINE_OPTIONS.
utils.engine_config_warning(app.config, '3.0', 'SQLALCHEMY_POOL_SIZE', 'pool_size')
utils.engine_config_warning(app.config, '3.0', 'SQLALCHEMY_POOL_TIMEOUT', 'pool_timeout')
utils.engine_config_warning(app.config, '3.0', 'SQLALCHEMY_POOL_RECYCLE', 'pool_recycle')
utils.engine_config_warning(app.config, '3.0', 'SQLALCHEMY_MAX_OVERFLOW', 'max_overflow')
app.extensions['sqlalchemy'] = _SQLAlchemyState(self)
@app.teardown_appcontext
def shutdown_session(response_or_exc):
if app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
warnings.warn(
"'COMMIT_ON_TEARDOWN' is deprecated and will be"
" removed in version 3.1. Call"
" 'db.session.commit()'` directly instead.",
DeprecationWarning,
)
if response_or_exc is None:
self.session.commit()
self.session.remove()
return response_or_exc
def apply_pool_defaults(self, app, options):
def _setdefault(optionkey, configkey):
value = app.config[configkey]
if value is not None:
options[optionkey] = value
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
def apply_driver_hacks(self, app, sa_url, options):
"""This method is called before engine creation and used to inject
driver specific hacks into the options. The `options` parameter is
a dictionary of keyword arguments that will then be used to call
the :func:`sqlalchemy.create_engine` function.
The default implementation provides some saner defaults for things
like pool sizes for MySQL and sqlite. Also it injects the setting of
`SQLALCHEMY_NATIVE_UNICODE`.
"""
if sa_url.drivername.startswith('mysql'):
sa_url.query.setdefault('charset', 'utf8')
if sa_url.drivername != 'mysql+gaerdbms':
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif sa_url.drivername == 'sqlite':
pool_size = options.get('pool_size')
detected_in_memory = False
if sa_url.database in (None, '', ':memory:'):
detected_in_memory = True
from sqlalchemy.pool import StaticPool
options['poolclass'] = StaticPool
if 'connect_args' not in options:
options['connect_args'] = {}
options['connect_args']['check_same_thread'] = False
# we go to memory and the pool size was explicitly set
# to 0 which is fail. Let the user know that
if pool_size == 0:
raise RuntimeError('SQLite in memory database with an '
'empty queue not possible due to data '
'loss.')
# if pool size is None or explicitly set to 0 we assume the
# user did not want a queue for this sqlite connection and
# hook in the null pool.
elif not pool_size:
from sqlalchemy.pool import NullPool
options['poolclass'] = NullPool
# if it's not an in memory database we make the path absolute.
if not detected_in_memory:
sa_url.database = os.path.join(app.root_path, sa_url.database)
unu = app.config['SQLALCHEMY_NATIVE_UNICODE']
if unu is None:
unu = self.use_native_unicode
if not unu:
options['use_native_unicode'] = False
if app.config['SQLALCHEMY_NATIVE_UNICODE'] is not None:
warnings.warn(
"The 'SQLALCHEMY_NATIVE_UNICODE' config option is deprecated and will be removed in"
" v3.0. Use 'SQLALCHEMY_ENGINE_OPTIONS' instead.",
DeprecationWarning
)
if not self.use_native_unicode:
warnings.warn(
"'use_native_unicode' is deprecated and will be removed in v3.0."
" Use the 'engine_options' parameter instead.",
DeprecationWarning
)
@property
def engine(self):
"""Gives access to the engine. If the database configuration is bound
to a specific application (initialized with an application) this will
always return a database connection. If however the current application
is used this might raise a :exc:`RuntimeError` if no application is
active at the moment.
"""
return self.get_engine()
def make_connector(self, app=None, bind=None):
"""Creates the connector for a given state and bind."""
return _EngineConnector(self, self.get_app(app), bind)
def get_engine(self, app=None, bind=None):
"""Returns a specific engine."""
app = self.get_app(app)
state = get_state(app)
with self._engine_lock:
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector
return connector.get_engine()
def create_engine(self, sa_url, engine_opts):
"""
Override this method to have final say over how the SQLAlchemy engine
is created.
In most cases, you will want to use ``'SQLALCHEMY_ENGINE_OPTIONS'``
config variable or set ``engine_options`` for :func:`SQLAlchemy`.
"""
return sqlalchemy.create_engine(sa_url, **engine_opts)
def get_app(self, reference_app=None):
"""Helper method that implements the logic to look up an
application."""
if reference_app is not None:
return reference_app
if current_app:
return current_app._get_current_object()
if self.app is not None:
return self.app
raise RuntimeError(
'No application found. Either work inside a view function or push'
' an application context. See'
' http://flask-sqlalchemy.pocoo.org/contexts/.'
)
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
result = []
for table in itervalues(self.Model.metadata.tables):
if table.info.get('bind_key') == bind:
result.append(table)
return result
def get_binds(self, app=None):
"""Returns a dictionary with a table->engine mapping.
This is suitable for use of sessionmaker(binds=db.get_binds(app)).
"""
app = self.get_app(app)
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
retval = {}
for bind in binds:
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def _execute_for_all_tables(self, app, bind, operation, skip_tables=False):
app = self.get_app(app)
if bind == '__all__':
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
elif isinstance(bind, string_types) or bind is None:
binds = [bind]
else:
binds = bind
for bind in binds:
extra = {}
if not skip_tables:
tables = self.get_tables_for_bind(bind)
extra['tables'] = tables
op = getattr(self.Model.metadata, operation)
op(bind=self.get_engine(app, bind), **extra)
def create_all(self, bind='__all__', app=None):
"""Creates all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'create_all')
def drop_all(self, bind='__all__', app=None):
"""Drops all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'drop_all')
def reflect(self, bind='__all__', app=None):
"""Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'reflect', skip_tables=True)
def __repr__(self):
return '<%s engine=%r>' % (
self.__class__.__name__,
self.engine.url if self.app or current_app else None
)
class _BoundDeclarativeMeta(DefaultMeta):
def __init__(cls, name, bases, d):
warnings.warn(FSADeprecationWarning(
'"_BoundDeclarativeMeta" has been renamed to "DefaultMeta". The'
' old name will be removed in 3.0.'
), stacklevel=3)
super(_BoundDeclarativeMeta, cls).__init__(name, bases, d)
class FSADeprecationWarning(DeprecationWarning):
pass
warnings.simplefilter('always', FSADeprecationWarning)
| 36.422078
| 123
| 0.622342
|
acfc403920d343838e4f54552dc898502c747638
| 519
|
py
|
Python
|
src/tandlr/scheduled_classes/migrations/0012_auto_20160805_1954.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
src/tandlr/scheduled_classes/migrations/0012_auto_20160805_1954.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
src/tandlr/scheduled_classes/migrations/0012_auto_20160805_1954.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scheduled_classes', '0011_auto_20160804_1801'),
]
operations = [
migrations.AlterField(
model_name='classbill',
name='promo_code',
field=models.ForeignKey(related_name='class_bills', verbose_name=b'promotion code', blank=True, to='promotions.PromotionCode', null=True),
),
]
| 25.95
| 150
| 0.655106
|
acfc4097ab0502a45e775d4a986189adfcfb55a2
| 4,892
|
py
|
Python
|
api-client/python-client-lambda/assume-role.py
|
kyhau/aws-cross-account-private-api-demo
|
cb10fcef138a8b11c9180288952425e794007a07
|
[
"MIT-0"
] | 14
|
2020-12-03T01:34:42.000Z
|
2022-03-22T13:27:55.000Z
|
api-client/python-client-lambda/assume-role.py
|
kyhau/aws-cross-account-private-api-demo
|
cb10fcef138a8b11c9180288952425e794007a07
|
[
"MIT-0"
] | null | null | null |
api-client/python-client-lambda/assume-role.py
|
kyhau/aws-cross-account-private-api-demo
|
cb10fcef138a8b11c9180288952425e794007a07
|
[
"MIT-0"
] | 4
|
2020-12-06T01:51:28.000Z
|
2021-11-19T13:58:53.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import json
import boto3
import re
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
import requests
from botocore.exceptions import ClientError
from boto3 import session
from aws_lambda_powertools import Logger
logger = Logger(service="python-api-client")
AWS_REGION = os.environ['AWS_REGION']
API_HOST = os.environ['API_HOST']
API_PREFIX = os.environ['API_PREFIX']
VPCE_DNS_NAMES = os.environ['VPCE_DNS_NAMES']
API_TIMEOUT = float(os.environ['API_TIMEOUT'])
PRIVATE_DNS_ENABLED = os.environ['PRIVATE_DNS_ENABLED']
API_URL = "https://" + API_HOST + API_PREFIX
if PRIVATE_DNS_ENABLED.lower() == 'false':
# Use the appropriate VPC endpoint DNS name instead.
# Look for the endpoint name that is not AZ specific. The list is not
# ordered so we need to search through each name.
for endpoint in VPCE_DNS_NAMES.split(','):
dist,name = endpoint.split(':', maxsplit=1)
if re.match(r'^vpce-[a-z0-9]+-[a-z0-9]+\.execute-api\.', name):
VPCE_DNS = name
break
API_URL = "https://" + VPCE_DNS + API_PREFIX
sts_client = boto3.client('sts')
def lambda_handler(event, context):
# Initialise a request object to use for signing.
# Make sure we're targetting the right API gateway host in the HTTP header,
# especially required if the VPC endpoint DNS name is being used.
logger.info("initialising API request to %s (host %s)", API_URL, API_HOST)
request = AWSRequest(method="GET", url=API_URL, headers={'host':API_HOST})
# Obtain credentials and use them to sign the request
credentials = get_api_credentials()
sigv4 = SigV4Auth(credentials, 'execute-api', AWS_REGION)
sigv4.add_auth(request)
prepreq = request.prepare()
logger.info("making request to url %s", prepreq.url)
response = requests.get(prepreq.url, headers=prepreq.headers, timeout=API_TIMEOUT)
logger.info("response code: %d", response.status_code)
logger.info("response text: %s", response.text)
return {'statusCode': response.status_code, 'body': response.text}
def get_api_credentials():
# Retrieve credentials based on the given role or from the current session
# if there is no role_to_assume environment variable.
try:
role_arn = None
sess = session.Session()
if 'ROLE_TO_ASSUME' in os.environ:
# Check the supplied role is a valid ARN
if re.match(r'^arn:aws:iam:.*?:\d{12}:role\/.*', os.environ['ROLE_TO_ASSUME']):
role_arn = os.environ['ROLE_TO_ASSUME']
if role_arn:
# Assume role in another account
# Environment variable AWS_STS_REGIONAL_ENDPOINTS should be set to "regional"
# to use the STS private endpoint. This is done via the Lambda function
# environment configuration.
logger.info('assuming role {}'.format(role_arn))
role_object = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName="APIClient-Session",
DurationSeconds=900
)
# Establish a boto3 session with the temporary credentials
sess = boto3.Session(
aws_access_key_id=role_object['Credentials']['AccessKeyId'],
aws_secret_access_key=role_object['Credentials']['SecretAccessKey'],
aws_session_token=role_object['Credentials']['SessionToken']
)
else:
# No role given, use the current session privileges
logger.warn('valid assume role ARN not supplied, using current session')
credentials = sess.get_credentials()
return credentials
except ClientError as e:
logger.error('STS assume_role failed: {}: {}'.format(type(e), e))
raise
| 44.880734
| 92
| 0.679068
|
acfc40b4a3bbca0d4dead3a9d16b11735f41bf6d
| 1,111
|
py
|
Python
|
client/setup.py
|
HFragnaud/neural-activity-resource
|
c2829eede347d3f21eca6f0dd089fbee69e89789
|
[
"Apache-2.0"
] | null | null | null |
client/setup.py
|
HFragnaud/neural-activity-resource
|
c2829eede347d3f21eca6f0dd089fbee69e89789
|
[
"Apache-2.0"
] | null | null | null |
client/setup.py
|
HFragnaud/neural-activity-resource
|
c2829eede347d3f21eca6f0dd089fbee69e89789
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='neural-activity-resource',
version='0.1.0',
description='Python API for the Human Brain Project Neural Activity Resource',
long_description=long_description,
url='https://github.com/apdavison/neural-activity-resource',
author='Andrew P. Davison',
author_email='andrew.davison@unic.cnrs-gif.fr',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='nar hbp metadata electrophysiology nexus shacl',
packages=find_packages(),
#install_requires=['pyxus']
)
| 33.666667
| 82
| 0.669667
|
acfc40f1571a2d6ee8938594ee8224c17eb608ff
| 5,147
|
py
|
Python
|
flaskr/blog.py
|
roblee357/Clarios_mold_OCR
|
3ef34bc4d81392bf5907fd6ea0d4236fa2197867
|
[
"MIT"
] | null | null | null |
flaskr/blog.py
|
roblee357/Clarios_mold_OCR
|
3ef34bc4d81392bf5907fd6ea0d4236fa2197867
|
[
"MIT"
] | null | null | null |
flaskr/blog.py
|
roblee357/Clarios_mold_OCR
|
3ef34bc4d81392bf5907fd6ea0d4236fa2197867
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask import Response
from flask import send_from_directory
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
from datetime import datetime
import base64, json
from io import BytesIO
import os
from PIL import Image
import re
from io import StringIO
import pyautogui
bp = Blueprint("blog", __name__, static_folder='static')
basedir = os.path.abspath(os.path.dirname(__file__))
@bp.route("/")
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = db.execute(
"SELECT p.id, title, body, created, author_id, username"
" FROM post p JOIN user u ON p.author_id = u.id"
" ORDER BY created DESC"
).fetchall()
return render_template("blog/index.html", posts=posts)
# @bp.after_request
# def add_header(r):
# """
# Add headers to both force latest IE rendering engine or Chrome Frame,
# and also to cache the rendered page for 10 minutes.
# """
# r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
# r.headers["Pragma"] = "no-cache"
# r.headers["Expires"] = "0"
# r.headers['Cache-Control'] = 'public, max-age=0'
# return r
def get_post(id, check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
post = (
get_db()
.execute(
"SELECT p.id, title, body, created, author_id, username"
" FROM post p JOIN user u ON p.author_id = u.id"
" WHERE p.id = ?",
(id,),
)
.fetchone()
)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post["author_id"] != g.user["id"]:
abort(403)
return post
@bp.route('/uploadImage/output.png', methods=['GET', 'POST'])
def parse_request():
now = datetime.now()
ts = now.strftime("%Y_%m_%d___%H_%M_%S")
data = json.loads(request.data)["image"]
data = data.split('base64,')[1]
im = Image.open(BytesIO(base64.b64decode(data)))
im.save(basedir + '/images/' + ts +'.png', 'PNG')
# need posted data here
# pyautogui.press('enter')
return Response("{'a':'b'}", status=200, mimetype='application/json')
@bp.route('/uploadImage/live.png', methods=['GET', 'POST'])
def parse_request_live():
data = json.loads(request.data)["image"]
data = data.split('base64,')[1]
im = Image.open(BytesIO(base64.b64decode(data)))
im.save(basedir + '/images/live.png', 'PNG')
# need posted data here
request = None
return Response("{'a':'b'}", status=200, mimetype='application/json')
# @bp.route('/static/manifest.json')
# def send_manafest():
# return send_from_directory('js', '/static/manifest.json')
@bp.route('/<path:filename>')
def send_file(filename):
return send_from_directory(bp.static_folder, filename)
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create():
"""Create a new post for the current user."""
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)",
(title, body, g.user["id"]),
)
db.commit()
return redirect(url_for("blog.index"))
return render_template("blog/create.html")
@bp.route("/<int:id>/update", methods=("GET", "POST"))
@login_required
def update(id):
"""Update a post if the current user is the author."""
post = get_post(id)
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"UPDATE post SET title = ?, body = ? WHERE id = ?", (title, body, id)
)
db.commit()
return redirect(url_for("blog.index"))
return render_template("blog/update.html", post=post)
@bp.route("/<int:id>/delete", methods=("POST",))
@login_required
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
get_post(id)
db = get_db()
db.execute("DELETE FROM post WHERE id = ?", (id,))
db.commit()
return redirect(url_for("blog.index"))
| 29.079096
| 85
| 0.613561
|
acfc419e02aac0dccc7521910278f9c9224ce24c
| 4,475
|
py
|
Python
|
bitglitter/config/palettemodels.py
|
MarkMichon1/BitGlitter-Python
|
0dcd5ec420d42f911189a5968a20ef4bc8381e60
|
[
"MIT"
] | 11
|
2021-07-30T14:35:57.000Z
|
2021-12-30T10:27:18.000Z
|
bitglitter/config/palettemodels.py
|
MarkMichon1/BitGlitter-Python
|
0dcd5ec420d42f911189a5968a20ef4bc8381e60
|
[
"MIT"
] | null | null | null |
bitglitter/config/palettemodels.py
|
MarkMichon1/BitGlitter-Python
|
0dcd5ec420d42f911189a5968a20ef4bc8381e60
|
[
"MIT"
] | 1
|
2021-10-21T00:22:51.000Z
|
2021-10-21T00:22:51.000Z
|
from sqlalchemy import Boolean, Column, Float, Integer, String, UniqueConstraint
import base64
import math
import time
from bitglitter.config.config import engine, SQLBaseClass
from bitglitter.utilities.palette import BitsToColor, ColorsToBits, convert_hex_to_rgb, get_color_distance, \
get_palette_id_from_hash
class Palette(SQLBaseClass):
__tablename__ = 'palettes'
__abstract__ = False
is_24_bit = Column(Boolean, default=False)
is_custom = Column(Boolean, default=True)
is_included_with_repo = Column(Boolean, default=False) # for differentiating other people's colors & our fancy ones
palette_id = Column(String, unique=True, nullable=False)
name = Column(String, unique=True, nullable=False)
description = Column(String)
nickname = Column(String, nullable=True, unique=True)
color_set = Column(String)
color_distance = Column(Float, default=0, nullable=False)
number_of_colors = Column(Integer, default=0, nullable=False)
bit_length = Column(Integer, default=0, nullable=False)
time_created = Column(Integer, default=time.time)
base64_string = Column(String)
is_valid = Column(Boolean)
@classmethod
def create(cls, color_set, **kwargs):
object_ = super().create(**kwargs)
object_._initialize_colors(color_set)
if object_.is_custom:
assembled_string = '\\\\'.join(
[object_.palette_id, object_.name, object_.description, str(object_.time_created),
str(object_.convert_colors_to_tuple())]) + '\\\\'
object_.base64_string = base64.b64encode(assembled_string.encode()).decode()
object_.save()
return object_
__table_args__ = (
UniqueConstraint('palette_id'),
)
def __str__(self):
palette_type = 'Custom' if self.is_custom else 'Default'
return f'{palette_type} Palette - {self.name} - {self.number_of_colors} Colors'
def _calculate_palette_math(self, color_set, save=True):
"""Runs during model creation and when color set is updated."""
self.color_distance = get_color_distance(color_set)
self.number_of_colors = len(color_set)
is_valid = math.log2(self.number_of_colors).is_integer()
if is_valid:
self.bit_length = int(math.log(self.number_of_colors, 2))
else:
self.bit_length = 0
self.is_valid = is_valid
if save: # Added to prevent repetitive saves if used in other methods
self.save()
def convert_colors_to_tuple(self):
"""Since all of their colors are stored as a single string for speed, this function retrieves it and returns
them in a more usable list format.
"""
if not self.is_24_bit:
string_split = self.color_set.split('|')
returned_list = []
for piece in string_split:
channels = piece.split(',')
channels = [int(channel) for channel in channels]
returned_list.append((channels[0], channels[1], channels[2]))
return returned_list
else:
return None
def _initialize_colors(self, color_set):
"""An internal method that blindly accepts tuples. Use palettefunctions functions for prior necessary
validation of values.
"""
color_set_cleaned = convert_hex_to_rgb(color_set) if color_set else None
if not self.is_24_bit:
self._calculate_palette_math(color_set_cleaned, save=False)
string_list = []
for color in color_set_cleaned:
to_string = [str(channel) for channel in color]
string_list.append(','.join(to_string))
self.color_set = '|'.join(string_list)
else:
self.bit_length = 24
self.color_distance = 0
self.number_of_colors = 16777216
if self.is_custom:
self.palette_id = get_palette_id_from_hash(self.name, self.description, self.time_created,
color_set_cleaned)
def return_encoder(self):
color_set_tupled = self.convert_colors_to_tuple()
return BitsToColor(color_set_tupled, self.bit_length, self.name)
def return_decoder(self):
color_set_tupled = self.convert_colors_to_tuple()
return ColorsToBits(color_set_tupled, self.bit_length, self.name)
SQLBaseClass.metadata.create_all(engine)
| 38.577586
| 120
| 0.660559
|
acfc420fb906ad781dcad9d046381bdcf7a1d5cb
| 326
|
py
|
Python
|
wowawards/migrations/0002_remove_image_image_comments.py
|
jameskomo/awwwards
|
fd72059828969ddaa734d7095502cb34624675cf
|
[
"MIT"
] | null | null | null |
wowawards/migrations/0002_remove_image_image_comments.py
|
jameskomo/awwwards
|
fd72059828969ddaa734d7095502cb34624675cf
|
[
"MIT"
] | 9
|
2020-02-12T00:22:55.000Z
|
2022-02-10T09:38:15.000Z
|
wowawards/migrations/0002_remove_image_image_comments.py
|
jameskomo/awwwards
|
fd72059828969ddaa734d7095502cb34624675cf
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-25 11:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wowawards', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='image_comments',
),
]
| 18.111111
| 47
| 0.588957
|
acfc4271ad2ad7e24c0c9e9e5e18f0543db86181
| 1,305
|
py
|
Python
|
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/06-Objects-and-Classes/02_Exercises/06-Inventory.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/06-Objects-and-Classes/02_Exercises/06-Inventory.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/06-Objects-and-Classes/02_Exercises/06-Inventory.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# 6. Inventory
# Create a class Inventory. The __init__ method should accept only the capacity of the inventory.
# The capacity should be a private attribute (__capacity). You can read more about private attributes here.
# Each inventory should also have an attribute called items, where all the items will be stored. The class should also have 3 methods:
# • add_item(item) - adds the item in the inventory if there is space for it. Otherwise, returns
# "not enough room in the inventory"
# • get_capacity() - returns the value of __capacity
# • __repr__() - returns "Items: {items}.\nCapacity left: {left_capacity}". The items should be separated by ", "
class Inventory:
def __init__(self, capacity):
self.__capacity = capacity
self.items = []
def add_item(self, item):
if len(self.items) < self.__capacity:
self.items.append(item)
else:
return "not enough room in the inventory"
def get_capacity(self):
return self.__capacity
def __repr__(self):
return f"Items: {', '.join(self.items)}.\nCapacity left: {self.__capacity - len(self.items)}"
inventory = Inventory(2)
inventory.add_item("potion")
inventory.add_item("sword")
print(inventory.add_item("bottle"))
print(inventory.get_capacity())
print(inventory)
| 38.382353
| 134
| 0.704215
|
acfc43a8108752249a71fe667cf4f6fc752dab66
| 670
|
py
|
Python
|
tests/playlist_test.py
|
evalexpr/reddit-spotify-playlist-generator
|
f7d32caff065740e4d8ab7c42b4b69256cedeaa2
|
[
"MIT"
] | null | null | null |
tests/playlist_test.py
|
evalexpr/reddit-spotify-playlist-generator
|
f7d32caff065740e4d8ab7c42b4b69256cedeaa2
|
[
"MIT"
] | null | null | null |
tests/playlist_test.py
|
evalexpr/reddit-spotify-playlist-generator
|
f7d32caff065740e4d8ab7c42b4b69256cedeaa2
|
[
"MIT"
] | null | null | null |
import unittest
from collections import namedtuple
from subreddit_to_playlist.playlist import Playlist
class PlaylistTest(unittest.TestCase):
def test_format_songs(self):
track = namedtuple('track', ['artist', 'track'])
songs = [track('foo', 'bar')]
playlist = Playlist(songs)
self.assertEqual(next(playlist._format_songs()), "artist:'foo' track:'bar'")
def test_generate_track_ids(self):
results = [
{'tracks': {'items': [{'uri': 'spotify:track:foo'}]}}
]
playlist = Playlist([])
ids = playlist._generate_track_ids(results)
self.assertEqual(ids, ['spotify:track:foo'])
| 26.8
| 84
| 0.635821
|
acfc43bc3fa0b0558967a21e3163e7bb540950fd
| 16,179
|
py
|
Python
|
tests/test_form.py
|
TheYoungSimba/MechanicalSoup
|
5926d0efd887097f9da56241ae945c4d5a81d8ba
|
[
"MIT"
] | 1
|
2018-09-24T09:20:24.000Z
|
2018-09-24T09:20:24.000Z
|
tests/test_form.py
|
TheYoungSimba/MechanicalSoup
|
5926d0efd887097f9da56241ae945c4d5a81d8ba
|
[
"MIT"
] | null | null | null |
tests/test_form.py
|
TheYoungSimba/MechanicalSoup
|
5926d0efd887097f9da56241ae945c4d5a81d8ba
|
[
"MIT"
] | null | null | null |
import setpath # noqa:F401, must come before 'import mechanicalsoup'
import mechanicalsoup
import bs4
from utils import setup_mock_browser
import sys
import pytest
def test_construct_form_fail():
"""Form objects must be constructed from form html elements."""
soup = bs4.BeautifulSoup('<notform>This is not a form</notform>', 'lxml')
tag = soup.find('notform')
assert isinstance(tag, bs4.element.Tag)
pytest.deprecated_call(mechanicalsoup.Form, tag)
def test_submit_online(httpbin):
"""Complete and submit the pizza form at http://httpbin.org/forms/post """
browser = mechanicalsoup.Browser()
page = browser.get(httpbin + "/forms/post")
form = mechanicalsoup.Form(page.soup.form)
input_data = {"custname": "Philip J. Fry"}
form.input(input_data)
check_data = {"size": "large", "topping": ["cheese"]}
form.check(check_data)
check_data = {"size": "medium", "topping": "onion"}
form.check(check_data)
form.textarea({"comments": "warm"})
form.textarea({"comments": "actually, no, not warm"})
form.textarea({"comments": "freezer"})
response = browser.submit(form, page.url)
# helpfully the form submits to http://httpbin.org/post which simply
# returns the request headers in json format
json = response.json()
data = json["form"]
assert data["custname"] == "Philip J. Fry"
assert data["custtel"] == "" # web browser submits "" for input left blank
assert data["size"] == "medium"
assert data["topping"] == ["cheese", "onion"]
assert data["comments"] == "freezer"
def test_submit_set(httpbin):
"""Complete and submit the pizza form at http://httpbin.org/forms/post """
browser = mechanicalsoup.Browser()
page = browser.get(httpbin + "/forms/post")
form = mechanicalsoup.Form(page.soup.form)
form["custname"] = "Philip J. Fry"
form["size"] = "medium"
form["topping"] = ("cheese", "onion")
form["comments"] = "freezer"
response = browser.submit(form, page.url)
# helpfully the form submits to http://httpbin.org/post which simply
# returns the request headers in json format
json = response.json()
data = json["form"]
assert data["custname"] == "Philip J. Fry"
assert data["custtel"] == "" # web browser submits "" for input left blank
assert data["size"] == "medium"
assert data["topping"] == ["cheese", "onion"]
assert data["comments"] == "freezer"
@pytest.mark.parametrize("expected_post", [
pytest.param(
[
('comment', 'Testing preview page'),
('preview', 'Preview Page'),
('text', 'Setting some text!')
], id='preview'),
pytest.param(
[
('comment', 'Created new page'),
('save', 'Submit changes'),
('text', '= Heading =\n\nNew page here!\n')
], id='save'),
pytest.param(
[
('comment', 'Testing choosing cancel button'),
('cancel', 'Cancel'),
('text', '= Heading =\n\nNew page here!\n')
], id='cancel'),
])
def test_choose_submit(expected_post):
browser, url = setup_mock_browser(expected_post=expected_post)
browser.open(url)
form = browser.select_form('#choose-submit-form')
browser['text'] = expected_post[2][1]
browser['comment'] = expected_post[0][1]
form.choose_submit(expected_post[1][0])
res = browser.submit_selected()
assert(res.status_code == 200 and res.text == 'Success!')
@pytest.mark.parametrize("value", [
pytest.param('continue', id='first'),
pytest.param('cancel', id='second'),
])
def test_choose_submit_from_selector(value):
"""Test choose_submit by passing a CSS selector argument."""
text = """
<form method="post" action="mock://form.com/post">
<input type="submit" name="do" value="continue" />
<input type="submit" name="do" value="cancel" />
</form>"""
browser, url = setup_mock_browser(expected_post=[('do', value)], text=text)
browser.open(url)
form = browser.select_form()
submits = form.form.select('input[value="{}"]'.format(value))
assert len(submits) == 1
form.choose_submit(submits[0])
res = browser.submit_selected()
assert res.status_code == 200 and res.text == 'Success!'
choose_submit_fail_form = '''
<html>
<form id="choose-submit-form">
<input type="submit" name="test_submit" value="Test Submit" />
</form>
</html>
'''
@pytest.mark.parametrize("select_name", [
pytest.param({'name': 'does_not_exist', 'fails': True}, id='not found'),
pytest.param({'name': 'test_submit', 'fails': False}, id='found'),
])
def test_choose_submit_fail(select_name):
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page(choose_submit_fail_form)
form = browser.select_form('#choose-submit-form')
if select_name['fails']:
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.choose_submit(select_name['name'])
else:
form.choose_submit(select_name['name'])
def test_choose_submit_twice():
"""Test that calling choose_submit twice fails."""
text = '''
<form>
<input type="submit" name="test1" value="Test1" />
<input type="submit" name="test2" value="Test2" />
</form>
'''
soup = bs4.BeautifulSoup(text, 'lxml')
form = mechanicalsoup.Form(soup.form)
form.choose_submit('test1')
expected_msg = 'Submit already chosen. Cannot change submit!'
with pytest.raises(Exception, match=expected_msg):
form.choose_submit('test2')
choose_submit_multiple_match_form = '''
<html>
<form id="choose-submit-form">
<input type="submit" name="test_submit" value="First Submit" />
<input type="submit" name="test_submit" value="Second Submit" />
</form>
</html>
'''
def test_choose_submit_multiple_match():
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page(choose_submit_multiple_match_form)
form = browser.select_form('#choose-submit-form')
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.choose_submit('test_submit')
submit_form_noaction = '''
<html>
<body>
<form id="choose-submit-form">
<input type="text" name="text1" value="someValue1" />
<input type="text" name="text2" value="someValue2" />
<input type="submit" name="save" />
</form>
</body>
</html>
'''
def test_form_noaction():
browser, url = setup_mock_browser()
browser.open_fake_page(submit_form_noaction, url=url)
form = browser.select_form('#choose-submit-form')
form['text1'] = 'newText1'
res = browser.submit_selected()
assert(res.status_code == 200 and browser.get_url() == url)
submit_form_action = '''
<html>
<body>
<form id="choose-submit-form" action="mock://form.com">
<input type="text" name="text1" value="someValue1" />
<input type="text" name="text2" value="someValue2" />
<input type="submit" name="save" />
</form>
</body>
</html>
'''
def test_form_action():
browser, url = setup_mock_browser()
# for info about example.com see: https://tools.ietf.org/html/rfc2606
browser.open_fake_page(submit_form_action,
url="http://example.com/invalid/")
form = browser.select_form('#choose-submit-form')
form['text1'] = 'newText1'
res = browser.submit_selected()
assert(res.status_code == 200 and browser.get_url() == url)
set_select_form = '''
<html>
<form method="post" action="mock://form.com/post">
<select name="entree">
<option value="tofu" selected="selected">Tofu Stir Fry</option>
<option value="curry">Red Curry</option>
<option value="tempeh">Tempeh Tacos</option>
</select>
<input type="submit" value="Select" />
</form>
</html>
'''
@pytest.mark.parametrize("option", [
pytest.param({'result': [('entree', 'tofu')], 'default': True},
id='default'),
pytest.param({'result': [('entree', 'curry')], 'default': False},
id='selected'),
])
def test_set_select(option):
'''Test the branch of Form.set that finds "select" elements.'''
browser, url = setup_mock_browser(expected_post=option['result'],
text=set_select_form)
browser.open(url)
browser.select_form('form')
if not option['default']:
browser[option['result'][0][0]] = option['result'][0][1]
res = browser.submit_selected()
assert(res.status_code == 200 and res.text == 'Success!')
set_select_multiple_form = '''
<form method="post" action="mock://form.com/post">
<select name="instrument" multiple>
<option value="piano">Piano</option>
<option value="bass">Bass</option>
<option value="violin">Violin</option>
</select>
<input type="submit" value="Select Multiple" />
</form>
'''
@pytest.mark.parametrize("options", [
pytest.param('bass', id='select one (str)'),
pytest.param(('bass',), id='select one (tuple)'),
pytest.param(('piano', 'violin'), id='select two'),
])
def test_set_select_multiple(options):
"""Test a <select multiple> element."""
# When a browser submits multiple selections, the qsl looks like:
# name=option1&name=option2
if not isinstance(options, list) and not isinstance(options, tuple):
expected = [('instrument', options)]
else:
expected = [('instrument', option) for option in options]
browser, url = setup_mock_browser(expected_post=expected,
text=set_select_multiple_form)
browser.open(url)
form = browser.select_form('form')
form.set_select({'instrument': options})
res = browser.submit_selected()
assert(res.status_code == 200 and res.text == 'Success!')
def test_form_not_found():
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page(page_with_various_fields)
form = browser.select_form('form')
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.input({'foo': 'bar', 'nosuchname': 'nosuchval'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.check({'foo': 'bar', 'nosuchname': 'nosuchval'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.check({'entree': 'cheese'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.check({'topping': 'tofu'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.textarea({'bar': 'value', 'foo': 'nosuchval'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.set_radio({'size': 'tiny'})
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.set_select({'entree': ('no_multiple', 'no_multiple')})
page_with_radio = '''
<html>
<form method="post">
<input type=checkbox name="foo" value="bacon"> This is a checkbox
</form>
</html>
'''
def test_form_check_uncheck():
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page(page_with_radio, url="http://example.com/invalid/")
form = browser.select_form('form')
assert "checked" not in form.form.find("input", {"name": "foo"}).attrs
form["foo"] = True
assert form.form.find("input", {"name": "foo"}).attrs["checked"] == ""
# Test explicit unchecking (skipping the call to Form.uncheck_all)
form.set_checkbox({"foo": False}, uncheck_other_boxes=False)
assert "checked" not in form.form.find("input", {"name": "foo"}).attrs
page_with_various_fields = '''
<html>
<form method="post">
<input name="foo">
<textarea name="bar">
</textarea>
<select name="entree">
<option value="tofu" selected="selected"> Tofu Stir Fry </option>
<option value="curry"> Red Curry</option>
<option value="tempeh">Tempeh Tacos </option>
</select>
<fieldset>
<legend> Pizza Toppings </legend>
<p><label> <input type=checkbox name="topping"
value="bacon"> Bacon </label></p>
<p><label> <input type=checkbox name="topping"
value="cheese" checked>Extra Cheese </label></p>
<p><label> <input type=checkbox name="topping"
value="onion" checked> Onion </label></p>
<p><label> <input type=checkbox name="topping"
value="mushroom"> Mushroom </label></p>
</fieldset>
<p><input name="size" type=radio value="small">Small</p>
<p><input name="size" type=radio value="medium">Medium</p>
<p><input name="size" type=radio value="large">Large</p>
<button name="action" value="cancel">Cancel</button>
<input type="submit" value="Select" />
</form>
</html>
'''
def test_form_print_summary(capsys):
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page(page_with_various_fields,
url="http://example.com/invalid/")
browser.select_form("form")
browser.get_current_form().print_summary()
out, err = capsys.readouterr()
# Different versions of bs4 show either <input></input> or
# <input/>. Normalize before comparing.
out = out.replace('></input>', '/>')
assert out == """<input name="foo"/>
<textarea name="bar"></textarea>
<select name="entree">
<option selected="selected" value="tofu">Tofu Stir Fry</option>
<option value="curry">Red Curry</option>
<option value="tempeh">Tempeh Tacos</option>
</select>
<input name="topping" type="checkbox" value="bacon"/>
<input checked="" name="topping" type="checkbox" value="cheese"/>
<input checked="" name="topping" type="checkbox" value="onion"/>
<input name="topping" type="checkbox" value="mushroom"/>
<input name="size" type="radio" value="small"/>
<input name="size" type="radio" value="medium"/>
<input name="size" type="radio" value="large"/>
<button name="action" value="cancel">Cancel</button>
<input type="submit" value="Select"/>
"""
assert err == ""
def test_issue180():
"""Test that a KeyError is not raised when Form.choose_submit is called
on a form where a submit element is missing its name-attribute."""
browser = mechanicalsoup.StatefulBrowser()
html = '''
<form>
<input type="submit" value="Invalid" />
<input type="submit" name="valid" value="Valid" />
</form>
'''
browser.open_fake_page(html)
form = browser.select_form()
with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
form.choose_submit('not_found')
def test_issue158():
"""Test that form elements are processed in their order on the page
and that elements with duplicate name-attributes are not clobbered."""
issue158_form = '''
<form method="post" action="mock://form.com/post">
<input name="box" type="hidden" value="1"/>
<input checked="checked" name="box" type="checkbox" value="2"/>
<input name="box" type="hidden" value="0"/>
<input type="submit" value="Submit" />
</form>
'''
expected_post = [('box', '1'), ('box', '2'), ('box', '0')]
browser, url = setup_mock_browser(expected_post=expected_post,
text=issue158_form)
browser.open(url)
browser.select_form()
res = browser.submit_selected()
assert(res.status_code == 200 and res.text == 'Success!')
browser.close()
@pytest.mark.parametrize("expected_post", [
pytest.param([('sub2', 'val2')], id='submit button'),
pytest.param([('sub4', 'val4')], id='typeless button'),
pytest.param([('sub5', 'val5')], id='submit input'),
])
def test_choose_submit_buttons(expected_post):
"""Buttons of type reset and button are not valid submits"""
text = """
<form method="post" action="mock://form.com/post">
<button type="button" name="sub1" value="val1">Val1</button>
<button type="submit" name="sub2" value="val2">Val2</button>
<button type="reset" name="sub3" value="val3">Val3</button>
<button name="sub4" value="val4">Val4</button>
<input type="submit" name="sub5" value="val5">
</form>
"""
browser, url = setup_mock_browser(expected_post=expected_post, text=text)
browser.open(url)
browser.select_form()
res = browser.submit_selected(btnName=expected_post[0][0])
assert res.status_code == 200 and res.text == 'Success!'
if __name__ == '__main__':
pytest.main(sys.argv)
| 34.943844
| 79
| 0.648804
|
acfc44b6c272ace148c7ebdf7c16626d68afccb8
| 25,515
|
py
|
Python
|
nltk/parse/recursivedescent.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/parse/recursivedescent.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/parse/recursivedescent.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
# Natural Language Toolkit: Recursive Descent Parser
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from nltk.grammar import Nonterminal
from nltk.tree import Tree, ImmutableTree
from nltk.parse.api import ParserI
##//////////////////////////////////////////////////////
## Recursive Descent Parser
##//////////////////////////////////////////////////////
class RecursiveDescentParser(ParserI):
"""
A simple top-down CFG parser that parses texts by recursively
expanding the fringe of a Tree, and matching it against a
text.
``RecursiveDescentParser`` uses a list of tree locations called a
"frontier" to remember which subtrees have not yet been expanded
and which leaves have not yet been matched against the text. Each
tree location consists of a list of child indices specifying the
path from the root of the tree to a subtree or a leaf; see the
reference documentation for Tree for more information
about tree locations.
When the parser begins parsing a text, it constructs a tree
containing only the start symbol, and a frontier containing the
location of the tree's root node. It then extends the tree to
cover the text, using the following recursive procedure:
- If the frontier is empty, and the text is covered by the tree,
then return the tree as a possible parse.
- If the frontier is empty, and the text is not covered by the
tree, then return no parses.
- If the first element of the frontier is a subtree, then
use CFG productions to "expand" it. For each applicable
production, add the expanded subtree's children to the
frontier, and recursively find all parses that can be
generated by the new tree and frontier.
- If the first element of the frontier is a token, then "match"
it against the next token from the text. Remove the token
from the frontier, and recursively find all parses that can be
generated by the new tree and frontier.
:see: ``nltk.grammar``
"""
def __init__(self, grammar, trace=0):
"""
Create a new ``RecursiveDescentParser``, that uses ``grammar``
to parse texts.
:type grammar: CFG
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
def grammar(self):
return self._grammar
def parse(self, tokens):
# Inherit docs from ParserI
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# Start a recursive descent parse, with an initial tree
# containing just the start symbol.
start = self._grammar.start().symbol()
initial_tree = Tree(start, [])
frontier = [()]
if self._trace:
self._trace_start(initial_tree, frontier, tokens)
return self._parse(tokens, initial_tree, frontier)
def _parse(self, remaining_text, tree, frontier):
"""
Recursively expand and match each elements of ``tree``
specified by ``frontier``, to cover ``remaining_text``. Return
a list of all parses found.
:return: An iterator of all parses that can be generated by
matching and expanding the elements of ``tree``
specified by ``frontier``.
:rtype: iter(Tree)
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type remaining_text: list(str)
:param remaining_text: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list(tuple(int))
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched. This list sorted
in left-to-right order of location within the tree.
"""
# If the tree covers the text, and there's nothing left to
# expand, then we've found a complete parse; return it.
if len(remaining_text) == 0 and len(frontier) == 0:
if self._trace:
self._trace_succeed(tree, frontier)
yield tree
# If there's still text, but nothing left to expand, we failed.
elif len(frontier) == 0:
if self._trace:
self._trace_backtrack(tree, frontier)
# If the next element on the frontier is a tree, expand it.
elif isinstance(tree[frontier[0]], Tree):
for result in self._expand(remaining_text, tree, frontier):
yield result
# If the next element on the frontier is a token, match it.
else:
for result in self._match(remaining_text, tree, frontier):
yield result
def _match(self, rtext, tree, frontier):
"""
:rtype: iter(Tree)
:return: an iterator of all parses that can be generated by
matching the first element of ``frontier`` against the
first token in ``rtext``. In particular, if the first
element of ``frontier`` has the same type as the first
token in ``rtext``, then substitute the token into
``tree``; and return all parses that can be generated by
matching and expanding the remaining elements of
``frontier``. If the first element of ``frontier`` does not
have the same type as the first token in ``rtext``, then
return empty list.
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type rtext: list(str)
:param rtext: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list of tuple of int
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
tree_leaf = tree[frontier[0]]
if len(rtext) > 0 and tree_leaf == rtext[0]:
# If it's a terminal that matches rtext[0], then substitute
# in the token, and continue parsing.
newtree = tree.copy(deep=True)
newtree[frontier[0]] = rtext[0]
if self._trace:
self._trace_match(newtree, frontier[1:], rtext[0])
for result in self._parse(rtext[1:], newtree, frontier[1:]):
yield result
else:
# If it's a non-matching terminal, fail.
if self._trace:
self._trace_backtrack(tree, frontier, rtext[:1])
def _expand(self, remaining_text, tree, frontier, production=None):
"""
:rtype: iter(Tree)
:return: An iterator of all parses that can be generated by
expanding the first element of ``frontier`` with
``production``. In particular, if the first element of
``frontier`` is a subtree whose node type is equal to
``production``'s left hand side, then add a child to that
subtree for each element of ``production``'s right hand
side; and return all parses that can be generated by
matching and expanding the remaining elements of
``frontier``. If the first element of ``frontier`` is not a
subtree whose node type is equal to ``production``'s left
hand side, then return an empty list. If ``production`` is
not specified, then return a list of all parses that can
be generated by expanding the first element of ``frontier``
with *any* CFG production.
:type tree: Tree
:param tree: A partial structure for the text that is
currently being parsed. The elements of ``tree``
that are specified by ``frontier`` have not yet been
expanded or matched.
:type remaining_text: list(str)
:param remaining_text: The portion of the text that is not yet
covered by ``tree``.
:type frontier: list(tuple(int))
:param frontier: A list of the locations within ``tree`` of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
if production is None:
productions = self._grammar.productions()
else:
productions = [production]
for production in productions:
lhs = production.lhs().symbol()
if lhs == tree[frontier[0]].label():
subtree = self._production_to_tree(production)
if frontier[0] == ():
newtree = subtree
else:
newtree = tree.copy(deep=True)
newtree[frontier[0]] = subtree
new_frontier = [
frontier[0] + (i,) for i in range(len(production.rhs()))
]
if self._trace:
self._trace_expand(newtree, new_frontier, production)
for result in self._parse(
remaining_text, newtree, new_frontier + frontier[1:]
):
yield result
def _production_to_tree(self, production):
"""
:rtype: Tree
:return: The Tree that is licensed by ``production``.
In particular, given the production ``[lhs -> elt[1] ... elt[n]]``
return a tree that has a node ``lhs.symbol``, and
``n`` children. For each nonterminal element
``elt[i]`` in the production, the tree token has a
childless subtree with node value ``elt[i].symbol``; and
for each terminal element ``elt[j]``, the tree token has
a leaf token with type ``elt[j]``.
:param production: The CFG production that licenses the tree
token that should be returned.
:type production: Production
"""
children = []
for elt in production.rhs():
if isinstance(elt, Nonterminal):
children.append(Tree(elt.symbol(), []))
else:
# This will be matched.
children.append(elt)
return Tree(production.lhs().symbol(), children)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
:type trace: int
:param trace: The trace level. A trace level of ``0`` will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
:rtype: None
"""
self._trace = trace
def _trace_fringe(self, tree, treeloc=None):
"""
Print trace output displaying the fringe of ``tree``. The
fringe of ``tree`` consists of all of its leaves and all of
its childless subtrees.
:rtype: None
"""
if treeloc == ():
print("*", end=" ")
if isinstance(tree, Tree):
if len(tree) == 0:
print(repr(Nonterminal(tree.label())), end=" ")
for i in range(len(tree)):
if treeloc is not None and i == treeloc[0]:
self._trace_fringe(tree[i], treeloc[1:])
else:
self._trace_fringe(tree[i])
else:
print(repr(tree), end=" ")
def _trace_tree(self, tree, frontier, operation):
"""
Print trace output displaying the parser's current state.
:param operation: A character identifying the operation that
generated the current state.
:rtype: None
"""
if self._trace == 2:
print(" %c [" % operation, end=" ")
else:
print(" [", end=" ")
if len(frontier) > 0:
self._trace_fringe(tree, frontier[0])
else:
self._trace_fringe(tree)
print("]")
def _trace_start(self, tree, frontier, text):
print("Parsing %r" % " ".join(text))
if self._trace > 2:
print("Start:")
if self._trace > 1:
self._trace_tree(tree, frontier, " ")
def _trace_expand(self, tree, frontier, production):
if self._trace > 2:
print("Expand: %s" % production)
if self._trace > 1:
self._trace_tree(tree, frontier, "E")
def _trace_match(self, tree, frontier, tok):
if self._trace > 2:
print("Match: %r" % tok)
if self._trace > 1:
self._trace_tree(tree, frontier, "M")
def _trace_succeed(self, tree, frontier):
if self._trace > 2:
print("GOOD PARSE:")
if self._trace == 1:
print("Found a parse:\n%s" % tree)
if self._trace > 1:
self._trace_tree(tree, frontier, "+")
def _trace_backtrack(self, tree, frontier, toks=None):
if self._trace > 2:
if toks:
print("Backtrack: %r match failed" % toks[0])
else:
print("Backtrack")
##//////////////////////////////////////////////////////
## Stepping Recursive Descent Parser
##//////////////////////////////////////////////////////
class SteppingRecursiveDescentParser(RecursiveDescentParser):
"""
A ``RecursiveDescentParser`` that allows you to step through the
parsing process, performing a single operation at a time.
The ``initialize`` method is used to start parsing a text.
``expand`` expands the first element on the frontier using a single
CFG production, and ``match`` matches the first element on the
frontier against the next text token. ``backtrack`` undoes the most
recent expand or match operation. ``step`` performs a single
expand, match, or backtrack operation. ``parses`` returns the set
of parses that have been found by the parser.
:ivar _history: A list of ``(rtext, tree, frontier)`` tripples,
containing the previous states of the parser. This history is
used to implement the ``backtrack`` operation.
:ivar _tried_e: A record of all productions that have been tried
for a given tree. This record is used by ``expand`` to perform
the next untried production.
:ivar _tried_m: A record of what tokens have been matched for a
given tree. This record is used by ``step`` to decide whether
or not to match a token.
:see: ``nltk.grammar``
"""
def __init__(self, grammar, trace=0):
super(SteppingRecursiveDescentParser, self).__init__(grammar, trace)
self._rtext = None
self._tree = None
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
# [XX] TEMPORARY HACK WARNING! This should be replaced with
# something nicer when we get the chance.
def _freeze(self, tree):
c = tree.copy()
# for pos in c.treepositions('leaves'):
# c[pos] = c[pos].freeze()
return ImmutableTree.convert(c)
def parse(self, tokens):
tokens = list(tokens)
self.initialize(tokens)
while self.step() is not None:
pass
return self.parses()
def initialize(self, tokens):
"""
Start parsing a given text. This sets the parser's tree to
the start symbol, its frontier to the root node, and its
remaining text to ``token['SUBTOKENS']``.
"""
self._rtext = tokens
start = self._grammar.start().symbol()
self._tree = Tree(start, [])
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
if self._trace:
self._trace_start(self._tree, self._frontier, self._rtext)
def remaining_text(self):
"""
:return: The portion of the text that is not yet covered by the
tree.
:rtype: list(str)
"""
return self._rtext
def frontier(self):
"""
:return: A list of the tree locations of all subtrees that
have not yet been expanded, and all leaves that have not
yet been matched.
:rtype: list(tuple(int))
"""
return self._frontier
def tree(self):
"""
:return: A partial structure for the text that is
currently being parsed. The elements specified by the
frontier have not yet been expanded or matched.
:rtype: Tree
"""
return self._tree
def step(self):
"""
Perform a single parsing operation. If an untried match is
possible, then perform the match, and return the matched
token. If an untried expansion is possible, then perform the
expansion, and return the production that it is based on. If
backtracking is possible, then backtrack, and return True.
Otherwise, return None.
:return: None if no operation was performed; a token if a match
was performed; a production if an expansion was performed;
and True if a backtrack operation was performed.
:rtype: Production or String or bool
"""
# Try matching (if we haven't already)
if self.untried_match():
token = self.match()
if token is not None:
return token
# Try expanding.
production = self.expand()
if production is not None:
return production
# Try backtracking
if self.backtrack():
self._trace_backtrack(self._tree, self._frontier)
return True
# Nothing left to do.
return None
def expand(self, production=None):
"""
Expand the first element of the frontier. In particular, if
the first element of the frontier is a subtree whose node type
is equal to ``production``'s left hand side, then add a child
to that subtree for each element of ``production``'s right hand
side. If ``production`` is not specified, then use the first
untried expandable production. If all expandable productions
have been tried, do nothing.
:return: The production used to expand the frontier, if an
expansion was performed. If no expansion was performed,
return None.
:rtype: Production or None
"""
# Make sure we *can* expand.
if len(self._frontier) == 0:
return None
if not isinstance(self._tree[self._frontier[0]], Tree):
return None
# If they didn't specify a production, check all untried ones.
if production is None:
productions = self.untried_expandable_productions()
else:
productions = [production]
parses = []
for prod in productions:
# Record that we've tried this production now.
self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
# Try expanding.
for _result in self._expand(self._rtext, self._tree, self._frontier, prod):
return prod
# We didn't expand anything.
return None
def match(self):
"""
Match the first element of the frontier. In particular, if
the first element of the frontier has the same type as the
next text token, then substitute the text token into the tree.
:return: The token matched, if a match operation was
performed. If no match was performed, return None
:rtype: str or None
"""
# Record that we've tried matching this token.
tok = self._rtext[0]
self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
# Make sure we *can* match.
if len(self._frontier) == 0:
return None
if isinstance(self._tree[self._frontier[0]], Tree):
return None
for _result in self._match(self._rtext, self._tree, self._frontier):
# Return the token we just matched.
return self._history[-1][0][0]
return None
def backtrack(self):
"""
Return the parser to its state before the most recent
match or expand operation. Calling ``undo`` repeatedly return
the parser to successively earlier states. If no match or
expand operations have been performed, ``undo`` will make no
changes.
:return: true if an operation was successfully undone.
:rtype: bool
"""
if len(self._history) == 0:
return False
(self._rtext, self._tree, self._frontier) = self._history.pop()
return True
def expandable_productions(self):
"""
:return: A list of all the productions for which expansions
are available for the current parser state.
:rtype: list(Production)
"""
# Make sure we *can* expand.
if len(self._frontier) == 0:
return []
frontier_child = self._tree[self._frontier[0]]
if len(self._frontier) == 0 or not isinstance(frontier_child, Tree):
return []
return [
p
for p in self._grammar.productions()
if p.lhs().symbol() == frontier_child.label()
]
def untried_expandable_productions(self):
"""
:return: A list of all the untried productions for which
expansions are available for the current parser state.
:rtype: list(Production)
"""
tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
return [p for p in self.expandable_productions() if p not in tried_expansions]
def untried_match(self):
"""
:return: Whether the first element of the frontier is a token
that has not yet been matched.
:rtype: bool
"""
if len(self._rtext) == 0:
return False
tried_matches = self._tried_m.get(self._freeze(self._tree), [])
return self._rtext[0] not in tried_matches
def currently_complete(self):
"""
:return: Whether the parser's current state represents a
complete parse.
:rtype: bool
"""
return len(self._frontier) == 0 and len(self._rtext) == 0
def _parse(self, remaining_text, tree, frontier):
"""
A stub version of ``_parse`` that sets the parsers current
state to the given arguments. In ``RecursiveDescentParser``,
the ``_parse`` method is used to recursively continue parsing a
text. ``SteppingRecursiveDescentParser`` overrides it to
capture these recursive calls. It records the parser's old
state in the history (to allow for backtracking), and updates
the parser's new state using the given arguments. Finally, it
returns ``[1]``, which is used by ``match`` and ``expand`` to
detect whether their operations were successful.
:return: ``[1]``
:rtype: list of int
"""
self._history.append((self._rtext, self._tree, self._frontier))
self._rtext = remaining_text
self._tree = tree
self._frontier = frontier
# Is it a good parse? If so, record it.
if len(frontier) == 0 and len(remaining_text) == 0:
self._parses.append(tree)
self._trace_succeed(self._tree, self._frontier)
return [1]
def parses(self):
"""
:return: An iterator of the parses that have been found by this
parser so far.
:rtype: list of Tree
"""
return iter(self._parses)
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
:param grammar: The new grammar.
:type grammar: CFG
"""
self._grammar = grammar
##//////////////////////////////////////////////////////
## Demonstration Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the recursive descent parser.
"""
from nltk import parse, CFG
grammar = CFG.fromstring(
"""
S -> NP VP
NP -> Det N | Det N PP
VP -> V NP | V NP PP
PP -> P NP
NP -> 'I'
N -> 'man' | 'park' | 'telescope' | 'dog'
Det -> 'the' | 'a'
P -> 'in' | 'with'
V -> 'saw'
"""
)
for prod in grammar.productions():
print(prod)
sent = "I saw a man in the park".split()
parser = parse.RecursiveDescentParser(grammar, trace=2)
for p in parser.parse(sent):
print(p)
if __name__ == "__main__":
demo()
| 37.03193
| 87
| 0.583696
|
acfc451c356284618a933074ad4fb80a0515ec85
| 2,773
|
py
|
Python
|
tests/cpim_parser_test.py
|
alxgb/msrp-parser
|
6917715a5b5431d02b55780d702295581d6d1400
|
[
"MIT"
] | null | null | null |
tests/cpim_parser_test.py
|
alxgb/msrp-parser
|
6917715a5b5431d02b55780d702295581d6d1400
|
[
"MIT"
] | null | null | null |
tests/cpim_parser_test.py
|
alxgb/msrp-parser
|
6917715a5b5431d02b55780d702295581d6d1400
|
[
"MIT"
] | null | null | null |
import textwrap
from msrp_parser.cpim_message import CpimMessage, CpimParseError
import pytest
def clean_msg(msg):
return textwrap.dedent(msg.strip())
def test_example_cpim_message():
msg = """\
From: MR SANDERS <im:piglet@100akerwood.com>
To: Depressed Donkey <im:eeyore@100akerwood.com>
DateTime: 2000-12-13T13:40:00-08:00
Subject: the weather will be fine today
NS: MyFeatures <mid:MessageFeatures@id.foo.com>
Require: MyFeatures.VitalMessageOption
MyFeatures.VitalMessageOption: Confirmation-requested
MyFeatures.WackyMessageOption: Use-silly-font
Content-Type: text/xml;charset=utf-8
Content-ID: <1234567890@foo.com>
<body>
Here is the text of my message.
</body>
"""
cpim_m = CpimMessage.from_string(clean_msg(msg))
assert len(cpim_m.headers) == 8
assert cpim_m.headers["DateTime"] == "2000-12-13T13:40:00-08:00"
assert cpim_m.content["body"] == ["<body>", "Here is the text of my message.", "</body>"]
assert cpim_m.content["headers"]["Content-Type"] == "text/xml;charset=utf-8"
assert cpim_m.content["headers"]["Content-ID"] == "<1234567890@foo.com>"
def test_sip_cpim_message():
msg = """\
From: <tel:+34666321123>
To: <sip:SA77qga-hwn11xxpqafswks79wc2wwuw8gbyynaalv2e2ebbvdug8t6a8av77wu6qyrooiqvq0twvhhc64gppj1m8bq6@127.0.0.1:12012;fid=vularcvim02512_1;transport=tcp>
DateTime: 2019-07-31T21:54:12.000Z
NS: imdn <urn:ietf:params:imdn>
imdn.Message-ID: 1816f9kChg
imdn.Disposition-Notification: positive-delivery
Content-Type: text/plain;charset=UTF-8
Content-Length: 43
Message sent on 2019-07-31
@ 21:54:12.000Z!
"""
cpim_m = CpimMessage.from_string(clean_msg(msg))
assert len(cpim_m.headers) == 6
assert cpim_m.headers["DateTime"] == "2019-07-31T21:54:12.000Z"
assert cpim_m.headers["From"] == "<tel:+34666321123>"
assert (
cpim_m.headers["To"]
== "<sip:SA77qga-hwn11xxpqafswks79wc2wwuw8gbyynaalv2e2ebbvdug8t6a8av77wu6qyrooiqvq0twvhhc64gppj1m8bq6@127.0.0.1:12012;fid=vularcvim02512_1;transport=tcp>"
)
assert cpim_m.headers["NS"] == "imdn <urn:ietf:params:imdn>"
assert cpim_m.headers["imdn.Message-ID"] == "1816f9kChg"
assert cpim_m.headers["imdn.Disposition-Notification"] == "positive-delivery"
assert cpim_m.content["body"] == ["Message sent on 2019-07-31", "@ 21:54:12.000Z!"]
assert cpim_m.content["headers"]["Content-Type"] == "text/plain;charset=UTF-8"
assert cpim_m.content["headers"]["Content-Length"] == "43"
def test_error_empty_message():
with pytest.raises(CpimParseError, match="Empty CPIM message"):
CpimMessage.from_string("")
| 39.056338
| 162
| 0.68013
|
acfc466bc9027a8a30d9cb39ee5c861f551ddc85
| 1,611
|
py
|
Python
|
onlinejudge/__init__.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | 38
|
2020-05-12T07:49:40.000Z
|
2022-03-31T07:57:51.000Z
|
onlinejudge/__init__.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | 56
|
2020-05-02T17:35:50.000Z
|
2022-02-27T06:53:06.000Z
|
onlinejudge/__init__.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | 11
|
2020-05-05T09:57:48.000Z
|
2022-02-27T06:31:22.000Z
|
"""
isort: skip_file
"""
# This is a workaround for the issue https://github.com/online-judge-tools/oj/issues/771
# You can reproduce this issue with:
# $ pip3 uninstall online-judge-tools online-judge-api-client
# $ pip3 install online-judge-tools==9.2.2
# $ pip3 install online-judge-api-client
# pylint: disable=unused-import,ungrouped-imports
try:
import onlinejudge._implementation.main # type: ignore
except ImportError:
pass
else:
import sys
import textwrap
print(textwrap.dedent("""\
You use an old version of online-judge-tools (< 10.0.0) with online-judge-api-client, they are not compatible.
Please execute:
1. Uninstall online-judge-tools and online-judge-api-client.
$ pip3 uninstall online-judge-tools online-judge-api-client
2. Check if they are completely uninstalled. It has successfully uninstalled when the following commands say something like "not found".
$ command oj
oj: command not found
$ python3 -c 'import pathlib, sys ; print(*[path for path in sys.path if (pathlib.Path(path) / "onlinejudge").exists()] or ["not installed"])'
not installed
$ pip3 show online-judge-tools online-judge-api-client
(no output)
3. Reinstall online-judge-tools.
$ pip3 install online-judge-tools"""), file=sys.stderr)
sys.exit(1)
# pylint: enable=unused-import,ungrouped-imports
import onlinejudge.dispatch
import onlinejudge.service
from onlinejudge.__about__ import __version__
| 37.465116
| 158
| 0.667908
|
acfc485dbcee4ee40384fcd0ac80459c7d35d34b
| 825
|
py
|
Python
|
app/ch16_mongodb/starter/pypi_org/data/db_session.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 496
|
2019-07-03T05:13:24.000Z
|
2022-03-27T01:15:10.000Z
|
app/ch16_mongodb/starter/pypi_org/data/db_session.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 20
|
2019-07-07T22:09:49.000Z
|
2021-12-28T03:03:09.000Z
|
app/ch16_mongodb/starter/pypi_org/data/db_session.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 562
|
2019-07-03T14:35:21.000Z
|
2022-03-31T06:23:58.000Z
|
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy.orm import Session
from pypi_org.data.modelbase import SqlAlchemyBase
__factory = None
def global_init(db_file: str):
global __factory
if __factory:
return
if not db_file or not db_file.strip():
raise Exception("You must specify a db file.")
conn_str = 'sqlite:///' + db_file.strip()
print("Connecting to DB with {}".format(conn_str))
engine = sa.create_engine(conn_str, echo=False)
__factory = orm.sessionmaker(bind=engine)
# noinspection PyUnresolvedReferences
import pypi_org.data.__all_models
SqlAlchemyBase.metadata.create_all(engine)
def create_session() -> Session:
global __factory
session: Session = __factory()
session.expire_on_commit = False
return session
| 21.153846
| 54
| 0.717576
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.