code stringlengths 281 23.7M |
|---|
class OptionPlotoptionsSankeySonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Scope(frozenset):
def __new__(cls, *members):
return super().__new__(cls, [str(m) for m in members])
def __repr__(self):
members = (("'" + "', '".join(sorted(self))) + "'")
return f'Scope({members})'
def __str__(self):
return ' '.join(sorted(self))
def __add__(self, other) -> 'Scope':
if isinstance(other, (str, scope)):
other = {str(other)}
elif (not isinstance(other, Scope)):
e = f'Addition not defined for {type(self)} and {type(other)}!'
raise NotImplementedError(e)
return type(self)(*self.union(other))
def __radd__(self, other) -> 'Scope':
return (self + other)
def __sub__(self, other) -> 'Scope':
if isinstance(other, (str, scope)):
other = {str(other)}
elif (not isinstance(other, Scope)):
e = f'Difference not defined for {type(self)} and {type(other)}!'
raise NotImplementedError(e)
return type(self)(*self.difference(other))
def __rsub__(self, other) -> 'Scope':
if (not isinstance(other, (str, scope))):
e = f'Difference not defined for {type(other)} and {type(self)}!'
raise NotImplementedError(e)
return (Scope(other) - self) |
class RealtimeEntryRecorded(ModelNormal):
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
_property
def openapi_types():
return {}
_property
def discriminator():
return None
attribute_map = {}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def cmd_playground():
ensure_node_install()
parser = argparse.ArgumentParser(description='Launches an instance of the LMQL playground.')
parser.add_argument('--live-port', type=int, default=3004, help='port to use to host the LMQL live server')
parser.add_argument('--ui-port', type=int, default=3000, help='port to use to host the LMQL debugger UI')
args = parser.parse_args(sys.argv[2:])
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(f'[lmql playground {project_root}, liveserver=localhost:{args.live_port}, ui=localhost:{args.ui_port}]')
if (subprocess.call(['yarn', '--version']) != 0):
subprocess.run(['npm', 'install', '-g', 'yarn'], check=True)
if os.path.exists(os.path.join(project_root, '../.git')):
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=project_root).decode('utf-8').strip()
commit = commit[:7]
has_uncomitted_files = (len(subprocess.check_output(['git', 'status', '--porcelain'], cwd=project_root).decode('utf-8').strip()) > 0)
if has_uncomitted_files:
commit += ' (dirty)'
commit = f'"{commit}"'
else:
commit = version_info.commit
yarn_cwd_live = os.path.join(project_root, 'lmql/ui/live')
subprocess.run(['yarn'], cwd=yarn_cwd_live, check=True)
yarn_cwd_playground = os.path.join(project_root, 'lmql/ui/playground')
subprocess.run(['yarn'], cwd=yarn_cwd_playground, check=True)
live_process = subprocess.Popen(['yarn', 'cross-env', 'node', 'live.js'], cwd=yarn_cwd_live, env=dict(os.environ, PORT=str(args.live_port)))
ui_modern_process = subprocess.Popen(['yarn', 'cross-env', 'yarn', 'run', 'start'], cwd=yarn_cwd_playground, env=dict(os.environ, REACT_APP_BUILD_COMMIT=str(commit), REACT_APP_SOCKET_PORT=str(args.live_port)))
try:
live_process.wait()
ui_modern_process.wait()
except KeyboardInterrupt:
print('[lmql playground] Ctrl+C pressed, exiting...')
live_process.terminate()
ui_modern_process.terminate() |
class GptsMessagesEntity(Model):
__tablename__ = 'gpts_messages'
id = Column(Integer, primary_key=True, comment='autoincrement id')
conv_id = Column(String(255), nullable=False, comment='The unique id of the conversation record')
sender = Column(String(255), nullable=False, comment='Who speaking in the current conversation turn')
receiver = Column(String(255), nullable=False, comment='Who receive message in the current conversation turn')
model_name = Column(String(255), nullable=True, comment='message generate model')
rounds = Column(Integer, nullable=False, comment='dialogue turns')
content = Column(Text, nullable=True, comment='Content of the speech')
current_gogal = Column(Text, nullable=True, comment='The target corresponding to the current message')
context = Column(Text, nullable=True, comment='Current conversation context')
review_info = Column(Text, nullable=True, comment='Current conversation review info')
action_report = Column(Text, nullable=True, comment='Current conversation action report')
role = Column(String(255), nullable=True, comment='The role of the current message content')
created_at = Column(DateTime, default=datetime.utcnow, comment='create time')
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, comment='last update time')
__table_args__ = (Index('idx_q_messages', 'conv_id', 'rounds', 'sender'),) |
def get_geth_version_info_string(**geth_kwargs):
if ('suffix_args' in geth_kwargs):
raise TypeError('The `get_geth_version` function cannot be called with the `suffix_args` parameter')
geth_kwargs['suffix_args'] = ['version']
(stdoutdata, stderrdata, command, proc) = geth_wrapper(**geth_kwargs)
return stdoutdata |
class OptionSeriesTreegraphDataDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def main() -> None:
parser = argparse.ArgumentParser(description='mypy wrapper linter.', fromfile_prefix_chars='')
parser.add_argument('--retries', default=3, type=int, help='times to retry timed out mypy')
parser.add_argument('--config', required=True, help='path to an mypy .ini config file')
parser.add_argument('--code', default='MYPY', help='the code this lint should report as')
parser.add_argument('--verbose', action='store_true', help='verbose logging')
parser.add_argument('filenames', nargs='+', help='paths to lint')
args = parser.parse_args()
logging.basicConfig(format='<%(threadName)s:%(levelname)s> %(message)s', level=(logging.NOTSET if args.verbose else (logging.DEBUG if (len(args.filenames) < 1000) else logging.INFO)), stream=sys.stderr)
filenames: Dict[(str, bool)] = {}
for filename in args.filenames:
if filename.endswith('.pyi'):
filenames[filename] = True
continue
stub_filename = filename.replace('.py', '.pyi')
if Path(stub_filename).exists():
filenames[stub_filename] = True
else:
filenames[filename] = True
lint_messages = (check_mypy_installed(args.code) + check_files(list(filenames), args.config, args.retries, args.code))
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True) |
class TestOptunaInvalidCastChoice():
def test_invalid_cast_choice(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test_hp_cast.yaml'])
optuna_config = OptunaTunerConfig(study_name='Basic Tests', direction='maximize')
with pytest.raises(TypeError):
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config) |
class OptionSeriesPolygonMarkerStates(Options):
def hover(self) -> 'OptionSeriesPolygonMarkerStatesHover':
return self._config_sub_data('hover', OptionSeriesPolygonMarkerStatesHover)
def normal(self) -> 'OptionSeriesPolygonMarkerStatesNormal':
return self._config_sub_data('normal', OptionSeriesPolygonMarkerStatesNormal)
def select(self) -> 'OptionSeriesPolygonMarkerStatesSelect':
return self._config_sub_data('select', OptionSeriesPolygonMarkerStatesSelect) |
class Solution(object):
def validPalindrome(self, s):
(st, en) = (0, (len(s) - 1))
while (st < en):
if (s[st] != s[en]):
s1 = (st + 1)
e1 = en
while (s1 < e1):
if (s[s1] != s[e1]):
break
s1 += 1
e1 -= 1
else:
return True
s1 = st
e1 = (en - 1)
while (s1 < e1):
if (s[s1] != s[e1]):
break
s1 += 1
e1 -= 1
else:
return True
return False
st += 1
en -= 1
return True |
def test_validate_mnt_size(monkeypatch, log_capture):
monkeypatch.setattr(simulation, 'WARN_MONITOR_DATA_SIZE_GB', (1 / (2 ** 30)))
s = SIM.copy(update=dict(monitors=(td.FieldMonitor(name='f', freqs=[.0], size=(1, 1, 1)),)))
s._validate_monitor_size()
assert_log_level(log_capture, 'WARNING')
monkeypatch.setattr(simulation, 'MAX_SIMULATION_DATA_SIZE_GB', (1 / (2 ** 30)))
with pytest.raises(SetupError):
s = SIM.copy(update=dict(monitors=(td.FieldMonitor(name='f', freqs=[.0], size=(1, 1, 1)),)))
s._validate_monitor_size() |
class OptionSeriesGaugeDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
def test_wf_with_catching_no_return():
def t1() -> typing.Dict:
return {}
def t2(d: typing.Dict):
assert (d == {})
def t3(s: str):
pass
with pytest.raises(AssertionError):
def wf():
d = t1()
x = t2(d=d)
t3(s=x)
wf() |
def aim_at_target(sensitivity, va, angle):
global g_current_tick
global g_previous_tick
y = (va.x - angle.x)
x = (va.y - angle.y)
if (y > 89.0):
y = 89.0
elif (y < (- 89.0)):
y = (- 89.0)
if (x > 180.0):
x -= 360.0
elif (x < (- 180.0)):
x += 360.0
if ((math.fabs(x) / 180.0) >= g_aimbot_fov):
target_set(Player(0))
return
if ((math.fabs(y) / 89.0) >= g_aimbot_fov):
target_set(Player(0))
return
x = ((x / sensitivity) / 0.022)
y = ((y / sensitivity) / (- 0.022))
if (g_aimbot_smooth > 1.0):
sx = 0.0
sy = 0.0
if (sx < x):
sx += (1.0 + (x / g_aimbot_smooth))
elif (sx > x):
sx -= (1.0 - (x / g_aimbot_smooth))
if (sy < y):
sy += (1.0 + (y / g_aimbot_smooth))
elif (sy > y):
sy -= (1.0 - (y / g_aimbot_smooth))
else:
sx = x
sy = y
if g_horizontal_only:
sy = 0
if ((g_current_tick - g_previous_tick) > 0):
g_previous_tick = g_current_tick
mouse.move(int(sx), int(sy)) |
class ExtraSettingsFormsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_form_with_valid_data(self):
form_data = {'name': 'PACKAGE_NAME', 'value_type': Setting.TYPE_BOOL}
form_obj = SettingForm(data=form_data)
self.assertTrue(form_obj.is_valid())
def test_form_with_incomplete_data(self):
form_data = {'name': 'PACKAGE_NAME'}
form_obj = SettingForm(data=form_data)
self.assertFalse(form_obj.is_valid())
def test_form_with_invalid_setting_name(self):
form_data = {'name': 'INSTALLED_APPS', 'value_type': Setting.TYPE_BOOL}
form_obj = SettingForm(data=form_data)
self.assertFalse(form_obj.is_valid())
def test_form_with_optional_data(self):
form_data = {'name': 'PACKAGE_NAME', 'value_type': Setting.TYPE_BOOL, 'description': 'Yes/No'}
form_obj = SettingForm(data=form_data)
self.assertTrue(form_obj.is_valid()) |
.parametrize('attr_dict', ({}, {'SafeSendLib': 'abc'}, {'SafeSendLib': 123}, {'SafeSendLib': b'abc'}, {'safe-send-lib': '0x4F5B11c860b37b68DE6D14Fb7e7b5f18A9A1bdC0'}, {'SafeSendLib': '0x4F5B11c860b37b68DE6D14Fb7e7b5f18A9A1bdC0', 'Wallet': '0xa66A05D6AB5c1c955F4D2c3FCC166AE6300b452B'}))
def test_contract_factory_invalidates_incorrect_attr_dicts(get_factory, attr_dict):
safe_send = get_factory('escrow', 'SafeSendLib')
assert (safe_send.needs_bytecode_linking is False)
with pytest.raises(BytecodeLinkingError):
safe_send.link_bytecode(attr_dict) |
class OptionPlotoptionsWordcloudSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
(no_gui_test_assistant, 'No GuiTestAssistant')
class TestConcreteWidget(unittest.TestCase, GuiTestAssistant):
def setUp(self):
GuiTestAssistant.setUp(self)
self.widget = ConcreteWidget()
def tearDown(self):
if (self.widget.control is not None):
with self.delete_widget(self.widget.control):
self.widget.destroy()
del self.widget
GuiTestAssistant.tearDown(self)
def test_lifecycle(self):
with self.event_loop():
self.widget.create()
with self.event_loop():
self.widget.destroy()
def test_initialize(self):
self.widget.visible = False
self.widget.enabled = False
with self.event_loop():
self.widget.create()
self.assertFalse(self.widget.control.isVisible())
self.assertFalse(self.widget.control.isEnabled())
def test_show(self):
with self.event_loop():
self.widget.create()
with self.assertTraitChanges(self.widget, 'visible', count=1):
with self.event_loop():
self.widget.show(False)
self.assertFalse(self.widget.control.isVisible())
def test_visible(self):
with self.event_loop():
self.widget.create()
with self.assertTraitChanges(self.widget, 'visible', count=1):
with self.event_loop():
self.widget.visible = False
self.assertFalse(self.widget.control.isVisible())
def test_contents_visible(self):
window = MainWindow()
window.create()
try:
with self.event_loop():
window.open()
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = False
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = True
finally:
window.destroy()
def test_contents_hidden(self):
window = MainWindow()
window.create()
try:
with self.event_loop():
window.open()
window.widget.visible = False
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = False
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = True
finally:
window.destroy()
(is_qt, 'Qt-specific test of hidden state')
def test_contents_hide_external_change(self):
window = MainWindow()
window.create()
try:
with self.event_loop():
window.open()
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = False
self.assertFalse(window.widget.control.isVisible())
self.assertFalse(window.widget.control.isHidden())
with self.assertTraitChanges(window.widget, 'visible'):
with self.event_loop():
window.widget.control.hide()
self.assertFalse(window.widget.visible)
self.assertFalse(window.widget.control.isVisible())
self.assertTrue(window.widget.control.isHidden())
with self.assertTraitDoesNotChange(window.widget, 'visible'):
with self.event_loop():
window.visible = True
self.assertFalse(window.widget.control.isVisible())
self.assertTrue(window.widget.control.isHidden())
finally:
window.destroy()
(is_qt, 'Qt-specific test of hidden state')
def test_show_widget_with_parent_is_invisible_qt(self):
window = MainWindow()
window.create()
try:
with self.event_loop():
window.open()
window.widget.visible = False
with self.event_loop():
window.visible = False
with self.event_loop():
window.widget.visible = True
self.assertTrue(window.widget.visible)
self.assertFalse(window.widget.control.isVisible())
self.assertFalse(window.widget.control.isHidden())
finally:
window.destroy()
(is_qt, 'Qt-specific test of hidden state')
def test_show_widget_then_parent_is_invisible_qt(self):
window = MainWindow()
window.create()
try:
with self.event_loop():
window.open()
window.visible = True
with self.event_loop():
window.widget.visible = True
with self.event_loop():
window.visible = False
self.assertTrue(window.widget.visible)
self.assertFalse(window.widget.control.isVisible())
self.assertFalse(window.widget.control.isHidden())
finally:
window.destroy()
def test_enable(self):
with self.event_loop():
self.widget.create()
with self.assertTraitChanges(self.widget, 'enabled', count=1):
with self.event_loop():
self.widget.enable(False)
self.assertFalse(self.widget.control.isEnabled())
def test_enabled(self):
with self.event_loop():
self.widget.create()
with self.assertTraitChanges(self.widget, 'enabled', count=1):
with self.event_loop():
self.widget.enabled = False
self.assertFalse(self.widget.control.isEnabled())
(is_mac, 'Broken on Linux and Windows')
def test_focus(self):
with self.event_loop():
self.widget.create()
self.assertFalse(self.widget.has_focus())
with self.event_loop():
self.widget.focus()
self.assertTrue(self.widget.has_focus()) |
class TestExternalPluginSourceSupplier():
def setup_method(self, method):
self.along_es = supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor('some-plugin', core_plugin=False), revision='abc', src_dir='/src', src_config={'plugin.some-plugin.src.subdir': 'elasticsearch-extra/some-plugin', 'plugin.some-plugin.build.artifact.subdir': 'plugin/build/distributions'}, builder=None)
self.standalone = supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor('some-plugin', core_plugin=False), revision='abc', src_dir=None, src_config={'plugin.some-plugin.src.dir': '/Projects/src/some-plugin', 'plugin.some-plugin.build.artifact.subdir': 'build/distributions'}, builder=None)
def test_invalid_config_no_source(self):
with pytest.raises(exceptions.SystemSetupError, match='Neither plugin.some-plugin.src.dir nor plugin.some-plugin.src.subdir are set for plugin some-plugin.'):
supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor('some-plugin', core_plugin=False), revision='abc', src_dir=None, src_config={'plugin.some-plugin.build.artifact.subdir': 'build/distributions'}, builder=None)
def test_invalid_config_duplicate_source(self):
with pytest.raises(exceptions.SystemSetupError, match='Can only specify one of plugin.duplicate.src.dir and plugin.duplicate.src.subdir but both are set.'):
supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor('duplicate', core_plugin=False), revision='abc', src_dir=None, src_config={'plugin.duplicate.src.subdir': 'elasticsearch-extra/some-plugin', 'plugin.duplicate.src.dir': '/Projects/src/some-plugin', 'plugin.duplicate.build.artifact.subdir': 'build/distributions'}, builder=None)
def test_standalone_plugin_overrides_build_dir(self):
assert (self.standalone.override_build_dir == '/Projects/src/some-plugin')
def test_along_es_plugin_keeps_build_dir(self):
assert (self.along_es.override_build_dir is None)
('glob.glob', (lambda p: ['/src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip']))
def test_add_binary_built_along_elasticsearch(self):
binaries = {}
self.along_es.add(binaries)
assert (binaries == {'some-plugin': 'file:///src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip'})
('glob.glob', (lambda p: ['/Projects/src/some-plugin/build/distributions/some-plugin.zip']))
def test_resolve_plugin_binary_built_standalone(self):
binaries = {}
self.along_es.add(binaries)
assert (binaries == {'some-plugin': 'file:///Projects/src/some-plugin/build/distributions/some-plugin.zip'}) |
def extractRosetranslatesWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('my sweet physician wife who calls the shots', 'my sweet physician wife who calls the shots', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Cohere(REST):
def credentials(self) -> Dict[(str, str)]:
api_key = os.getenv('CO_API_KEY')
if (api_key is None):
warnings.warn("Could not find the API key to access the Cohere API. Ensure you have an API key set up via then make it available as an environment variable 'CO_API_KEY'.")
return {'Authorization': f'Bearer {api_key}'}
def _verify_auth(self) -> None:
try:
self(['test'])
except ValueError as err:
if ('invalid api token' in str(err)):
warnings.warn('Authentication with provided API key failed. Please double-check you provided the correct credentials.')
else:
raise err
def __call__(self, prompts: Iterable[str]) -> Iterable[str]:
headers = {**self._credentials, 'Content-Type': 'application/json', 'Accept': 'application/json'}
api_responses: List[str] = []
prompts = list(prompts)
def _request(json_data: Dict[(str, Any)]) -> Dict[(str, Any)]:
r = self.retry(call_method=requests.post, url=self._endpoint, headers=headers, json={**json_data, **self._config, 'model': self._name}, timeout=self._max_request_time)
try:
r.raise_for_status()
except HTTPError as ex:
res_content = srsly.json_loads(r.content.decode('utf-8'))
error_message = res_content.get('message', {})
if ('blocked' in error_message):
if self._strict:
raise ValueError(f"Cohere API returned a blocking error. {error_message}. If you wish to ignore and continue, you can pass 'False' to the 'strict' argument of this model. However, note that this will affect how spacy-llm parses the response.") from ex
else:
raise ValueError(f'Request to Cohere API failed: {error_message}') from ex
response = r.json()
if ('message' in response):
if self._strict:
raise ValueError(f'API call failed: {response}.')
else:
assert isinstance(prompts, Sized)
return {'error': ([srsly.json_dumps(response)] * len(prompts))}
return response
responses = [_request({'prompt': prompt}) for prompt in prompts]
for response in responses:
if ('generations' in response):
for result in response['generations']:
if ('text' in result):
api_responses.append(result['text'])
break
else:
api_responses.append(srsly.json_dumps(response))
else:
api_responses.append(srsly.json_dumps(response))
return api_responses |
def save_to_csv(generator_ad_archives, args, fields, is_verbose=False):
if (len(args) != 1):
raise Exception('save_to_csv action takes 1 argument: output_file')
delimiter = ','
total_count = 0
output = (fields + '\n')
output_file = args[0]
for ad_archives in generator_ad_archives:
total_count += len(ad_archives)
if is_verbose:
print(('Items processed: %d' % total_count))
for ad_archive in ad_archives:
for field in list(fields.split(delimiter)):
if (field in ad_archive):
value = ad_archive[field]
if (((type(value) == list) and (type(value[0]) == dict)) or (type(value) == dict)):
value = json.dumps(value)
elif (type(value) == list):
value = delimiter.join(value)
output += ((('"' + value.replace('\n', '').replace('"', '')) + '"') + delimiter)
else:
output += delimiter
output = (output.rstrip(',') + '\n')
with open(output_file, 'w') as csvfile:
csvfile.write(output)
print(('Successfully wrote data to file: %s' % output_file)) |
def run_example():
bucket_size = 5
epsilon = 1
print('>>>>> in RAPPOR')
rappor = RAPPOR(bucket_size=bucket_size, epsilon=epsilon)
(bucket_list, true_hist) = example.generate_bucket(n=10000, bucket_size=bucket_size, distribution_name='uniform')
print('this is buckets: ', bucket_list)
print('this is true hist: ', true_hist)
private_bucket_list = [rappor.user_encode(item) for item in bucket_list]
estimate_hist = rappor.aggregate_histogram(private_bucket_list)
print('this is estimate_hist', estimate_hist) |
def test_ld(golden):
ConfigLoad = new_config_ld()
_gemm_config_ld_i8 = ('gemmini_extended3_config_ld({src_stride}, ' + '{scale}[0], 0, 0);\n')
(_gemm_config_ld_i8)
def config_ld_i8(scale: f32, src_stride: stride):
ConfigLoad.scale = scale
ConfigLoad.src_stride = src_stride
_gemm_do_ld_i8 = ('gemmini_extended_mvin( {src}.data, ' + '((uint64_t) {dst}.data), {m}, {n} );')
(_gemm_do_ld_i8)
def do_ld_i8(n: size, m: size, src: (i8[(n, m)] DRAM), dst: (i8[(n, 16)] GEMM_SCRATCH)):
assert (n <= 16)
assert (m <= 16)
assert (stride(src, 1) == 1)
assert (stride(dst, 0) == 16)
assert (stride(dst, 1) == 1)
assert (stride(src, 0) == ConfigLoad.src_stride)
for i in seq(0, n):
for j in seq(0, m):
tmp: f32
tmp = src[(i, j)]
tmp = (tmp * ConfigLoad.scale)
dst[(i, j)] = tmp
_gemm_ld_i8 = ((('gemmini_extended3_config_ld({stride(src, 0)}, ' + '{scale}[0], 0, 0);\n') + 'gemmini_extended_mvin( {src}.data, ') + '((uint64_t) {dst}.data), {m}, {n} );')
(_gemm_ld_i8)
def ld_i8(n: size, m: size, scale: f32, src: (i8[(n, m)] DRAM), dst: (i8[(n, 16)] GEMM_SCRATCH)):
assert (n <= 16)
assert (m <= 16)
assert (stride(src, 1) == 1)
assert (stride(dst, 0) == 16)
assert (stride(dst, 1) == 1)
for i in seq(0, n):
for j in seq(0, m):
tmp: f32
tmp = src[(i, j)]
tmp = (tmp * scale)
dst[(i, j)] = tmp
ld_i8 = bind_config(ld_i8, 'scale', ConfigLoad, 'scale')
ld_i8 = reorder_stmts(ld_i8, 'tmp = src[_] ; ConfigLoad.scale = _')
ld_i8 = reorder_stmts(ld_i8, 'tmp : _ ; ConfigLoad.scale = _')
ld_i8 = autofission(ld_i8, ld_i8.find('ConfigLoad.scale = _').after(), n_lifts=3)
ld_i8 = write_config(ld_i8, ld_i8.find('ConfigLoad.scale = _').after(), ConfigLoad, 'src_stride', 'stride(src, 0)')
ld_i8 = replace(ld_i8, 'for i in _:_', do_ld_i8)
ld_i8 = replace(ld_i8, 'ConfigLoad.scale = _ ; ConfigLoad.src_stride = _', config_ld_i8)
assert (f'''{config_ld_i8}
{ld_i8}''' == golden) |
def convert_to_richtext(apps, schema_editor):
BlogPost = apps.get_model('blog', 'BlogPost')
for post in BlogPost.objects.all():
if (post.body.raw_text is None):
raw_text = ''.join([child.value.source for child in post.body if (child.block_type == 'rich_text')])
post.body = raw_text
post.save() |
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(date=first_of_month).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append('There are {} concessions'.format(num_concessions))
lines.append('There are {} concessions for {}'.format(num_concessions_in_this_month, today.strftime('%B %Y')))
if (num_unmatched_concessions == 0):
lines.append('There are no unreconciled concessions')
elif (num_unmatched_concessions == 1):
lines.append('There is 1 unreconciled concession')
else:
lines.append('There are {} unreconciled concessions'.format(num_unmatched_concessions))
if (num_unmatched_concessions > 0):
lines.append('')
lines.append('To reconcile, tell bennett_bot:')
lines.append('`op ncso reconcile concession [ID] against vmpp [VMPP ID]`')
for c in unmatched_concessions:
lines.append(('-' * 80))
lines.append('ID: {}'.format(c.id))
lines.append('Drug: {}'.format(c.drug))
lines.append('Pack size: {}'.format(c.pack_size))
print('\n'.join(lines)) |
class FieldFile(Field):
name = 'Field Integer'
def __init__(self, page: primitives.PageModel, value, label, placeholder, icon, width, height, html_code, helper, options, profile):
html_input = page.ui.inputs.file(page.inputs.get(html_code, value), width=(None, '%'), placeholder=placeholder, options=options)
super(FieldFile, self).__init__(page, html_input, label, icon, width, height, html_code, helper, options, profile) |
class flow_lightweight_stats_entry(loxi.OFObject):
def __init__(self, table_id=None, reason=None, priority=None, match=None, stats=None):
if (table_id != None):
self.table_id = table_id
else:
self.table_id = 0
if (reason != None):
self.reason = reason
else:
self.reason = 0
if (priority != None):
self.priority = priority
else:
self.priority = 0
if (match != None):
self.match = match
else:
self.match = ofp.match()
if (stats != None):
self.stats = stats
else:
self.stats = ofp.stat()
return
def pack(self):
packed = []
packed.append(struct.pack('!H', 0))
packed.append(('\x00' * 2))
packed.append(struct.pack('!B', self.table_id))
packed.append(struct.pack('!B', self.reason))
packed.append(struct.pack('!H', self.priority))
packed.append(self.match.pack())
packed.append(self.stats.pack())
length = sum([len(x) for x in packed])
packed[0] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = flow_lightweight_stats_entry()
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 2)
reader.skip(2)
obj.table_id = reader.read('!B')[0]
obj.reason = reader.read('!B')[0]
obj.priority = reader.read('!H')[0]
obj.match = ofp.match.unpack(reader)
obj.stats = ofp.stat.unpack(reader)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.table_id != other.table_id):
return False
if (self.reason != other.reason):
return False
if (self.priority != other.priority):
return False
if (self.match != other.match):
return False
if (self.stats != other.stats):
return False
return True
def pretty_print(self, q):
q.text('flow_lightweight_stats_entry {')
with q.group():
with q.indent(2):
q.breakable()
q.text('table_id = ')
q.text(('%#x' % self.table_id))
q.text(',')
q.breakable()
q.text('reason = ')
value_name_map = {0: 'OFPFSR_STATS_REQUEST', 1: 'OFPFSR_STAT_TRIGGER'}
if (self.reason in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.reason], self.reason)))
else:
q.text(('%#x' % self.reason))
q.text(',')
q.breakable()
q.text('priority = ')
q.text(('%#x' % self.priority))
q.text(',')
q.breakable()
q.text('match = ')
q.pp(self.match)
q.text(',')
q.breakable()
q.text('stats = ')
q.pp(self.stats)
q.breakable()
q.text('}') |
def run_stage(config: Dict[(str, Any)], instance_id: str, stage: PrivateComputationBaseStageFlow, logger: logging.Logger, server_ips: Optional[List[str]]=None, dry_run: bool=False) -> None:
pc_service = build_private_computation_service(config['private_computation'], config['mpc'], config['pid'], config.get('post_processing_handlers', {}), config.get('pid_post_processing_handlers', {}))
pc_service.update_instance(instance_id)
instance = pc_service.run_stage(instance_id=instance_id, stage=stage, server_ips=server_ips, dry_run=dry_run)
logger.info(instance) |
def downgrade():
with op.batch_alter_table('feed_event', schema=None) as batch_op:
batch_op.alter_column('pour', existing_type=sa.INTEGER(), nullable=False, existing_server_default=sa.text("'0'"))
batch_op.alter_column('full', existing_type=sa.INTEGER(), nullable=False, existing_server_default=sa.text("'0'")) |
class MsgServicer(object):
def GrantAllowance(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RevokeAllowance(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
class Permute(Fixed, VolumePreserving):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, params_fn: Optional[flowtorch.Lazy]=None, *, shape: torch.Size, context_shape: Optional[torch.Size]=None, permutation: Optional[torch.Tensor]=None) -> None:
super().__init__(params_fn, shape=shape, context_shape=context_shape)
self.permutation = permutation
def _forward(self, x: torch.Tensor, params: Optional[Sequence[torch.Tensor]]) -> Tuple[(torch.Tensor, Optional[torch.Tensor])]:
if (self.permutation is None):
self.permutation = torch.randperm(x.shape[(- 1)])
y = torch.index_select(x, (- 1), self.permutation)
ladj = self._log_abs_det_jacobian(x, y, params)
return (y, ladj)
def _inverse(self, y: torch.Tensor, params: Optional[Sequence[torch.Tensor]]) -> Tuple[(torch.Tensor, Optional[torch.Tensor])]:
if (self.permutation is None):
self.permutation = torch.randperm(y.shape[(- 1)])
x = torch.index_select(y, (- 1), self.inv_permutation)
ladj = self._log_abs_det_jacobian(x, y, params)
return (x, ladj)
_property
def inv_permutation(self) -> Optional[torch.Tensor]:
if (self.permutation is None):
return None
result = torch.empty_like(self.permutation, dtype=torch.long)
result[self.permutation] = torch.arange(self.permutation.size(0), dtype=torch.long, device=self.permutation.device)
return result |
def test_fit_knee():
ap_params = [50, 10, 1]
gauss_params = [10, 0.3, 2, 20, 0.1, 4, 60, 0.3, 1]
nlv = 0.0025
(xs, ys) = sim_power_spectrum([1, 150], ap_params, gauss_params, nlv)
tfm = SpectralModel(aperiodic_mode='knee', verbose=False)
tfm.fit(xs, ys)
assert np.allclose(ap_params, tfm.aperiodic_params_, [1, 2, 0.2])
for (ii, gauss) in enumerate(group_three(gauss_params)):
assert np.allclose(gauss, tfm.gaussian_params_[ii], [2.0, 0.5, 1.0]) |
class HighQualityWriter():
def __init__(self, fps=30):
self.fps = fps
self.tmp_dir = None
self.cur_frame = 0
def _initialize_video(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.cur_frame = 0
def add_frame(self, frame):
if (self.tmp_dir is None):
self._initialize_video()
frame_path = os.path.join(self.tmp_dir.name, f'{self.cur_frame}.png')
imageio.imwrite(frame_path, frame)
self.cur_frame += 1
def write(self, output_path):
abs_tmp_dir_path = pathlib.Path(self.tmp_dir.name).absolute()
abs_output_path = pathlib.Path(output_path).absolute()
os.makedirs(os.path.dirname(abs_output_path), exist_ok=True)
subprocess.call([FFMPEG_PATH, '-framerate', f'{self.fps}', '-i', f'{abs_tmp_dir_path}/%d.png', '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', '-crf', '1', '-y', abs_output_path])
self.tmp_dir.cleanup()
self.tmp_dir = None
print(f'Video written to: {abs_output_path}') |
def user_login(request, username=None):
logger.debug('in user_login')
next_page = None
if ('next' in request.GET):
next_page = request.GET['next']
password = ''
if request.POST:
if (not username):
username = request.POST['username']
if ('password' in request.POST):
password = request.POST['password']
elif ('password2' in request.POST):
password = request.POST['password2']
if ('next' in request.POST):
next_page = request.POST['next']
user = authenticate(username=username, password=password)
if (user is not None):
if user.is_active:
login(request, user)
process_unsaved_booking(request)
if request.session.get('new_booking_redirect'):
booking_id = request.session['new_booking_redirect']['booking_id']
location_slug = request.session['new_booking_redirect']['location_slug']
request.session.pop('new_booking_redirect')
messages.add_message(request, messages.INFO, 'Thank you! Your booking has been submitted. Please allow us up to 24 hours to respond.')
return HttpResponseRedirect(reverse('booking_detail', args=(location_slug, booking_id)))
if ((not next_page) or (len(next_page) == 0) or ('logout' in next_page)):
next_page = '/'
return HttpResponseRedirect(next_page)
return render(request, 'registration/login.html') |
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--ip', type=str, default='::', help='IP address to bind to')
parser.add_argument('--port', type=int, default=1969, help='port to bind to')
parser.add_argument('--retries', type=int, default=5, help='number of per-packet retries')
parser.add_argument('--timeout_s', type=int, default=2, help='timeout for packet retransmission')
parser.add_argument('--root', type=str, default='', help='root of the static filesystem')
return parser.parse_args() |
class OCRThread(QThread):
def __init__(self, image_path):
QThread.__init__(self)
self.image_path = image_path
def adjust_ocr(self, ocr_string):
for char in OCR_ADJUST_DICT:
ocr_string = ocr_string.replace(char, OCR_ADJUST_DICT[char])
return ocr_string
def run(self):
try:
message_to_emacs("Use PaddleOCR analyze screenshot, it's need few seconds to analyze...")
import os
python_command = get_emacs_var('eaf-python-command')
command_string = '{} paddle_ocr.py {}'.format(python_command, self.image_path)
cwd = os.path.join(os.path.dirname(__file__), 'core')
import subprocess
process = subprocess.Popen(command_string, cwd=cwd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
string = process.stdout.readlines()[(- 1)]
eval_in_emacs('eaf-ocr-buffer-record', [self.adjust_ocr(string)])
except:
import traceback
traceback.print_exc()
message_to_emacs("Use EasyOCR analyze screenshot, it's need few seconds to analyze...")
try:
import easyocr
reader = easyocr.Reader(['ch_sim', 'en'])
result = reader.readtext(self.image_path)
string = ''.join(list(map((lambda r: r[1]), result)))
eval_in_emacs('eaf-ocr-buffer-record', [self.adjust_ocr(string)])
except:
import traceback
traceback.print_exc()
message_to_emacs('Please use pip3 install PaddleOCR or EasyOCR first.')
import os
os.remove(self.image_path) |
def test_lock(tmp_path, monkeypatch):
with open_storage(tmp_path, mode='w') as storage:
experiment_id = storage.create_experiment()
storage.create_ensemble(experiment_id, name='foo', ensemble_size=42)
monkeypatch.setattr(local.LocalStorageAccessor, 'LOCK_TIMEOUT', 0.1)
with pytest.raises(TimeoutError):
open_storage(tmp_path, mode='w')
with open_storage(tmp_path) as storage2:
assert (_cases(storage) == _cases(storage2))
with open_storage(tmp_path, mode='w') as storage:
assert (_cases(storage) == ['foo']) |
class DirectOutputError(Exception):
Errors = {E_HANDLE: 'Invalid device handle specified.', E_INVALIDARG: "An argument is invalid, and I don't mean it has a poorly leg.", E_OUTOFMEMORY: 'Download more RAM.', E_PAGENOTACTIVE: 'Page not active, stupid page.', E_BUFFERTOOSMALL: 'Buffer used was too small. Use a bigger buffer. See also E_OUTOFMEMORY.', E_NOTIMPL: 'Feature not implemented, allegedly'}
def __init__(self, error_code):
self.error_code = error_code
if (error_code in self.Errors):
self.msg = self.Errors[error_code]
else:
self.msg = ('Unspecified DirectOutput Error - ' + str(hex(error_code)))
def __str__(self):
return self.msg |
class CustomEditor(BasicEditorFactory):
klass = Property()
factory = Callable()
args = Tuple()
def __init__(self, *args, **traits):
if (len(args) >= 1):
self.factory = args[0]
self.args = args[1:]
super().__init__(**traits)
def _get_klass(self):
return toolkit_object('custom_editor:CustomEditor') |
_checkpoint(dump_params=True, include=['dataset_id'], component=LOG_COMPONENT)
def _get_attribution_dataset_info(client: BoltGraphAPIClient[BoltPAGraphAPICreateInstanceArgs], dataset_id: str, logger: logging.Logger) -> Any:
return json.loads(client.get_attribution_dataset_info(dataset_id, [DATASETS_INFORMATION, TARGET_ID]).text) |
def example():
async def move_divider(e: ft.DragUpdateEvent):
if (((e.delta_y > 0) and (c.height < 300)) or ((e.delta_y < 0) and (c.height > 100))):
c.height += e.delta_y
(await c.update_async())
async def show_draggable_cursor(e: ft.HoverEvent):
e.control.mouse_cursor = ft.MouseCursor.RESIZE_UP_DOWN
(await e.control.update_async())
c = ft.Container(bgcolor=ft.colors.AMBER, alignment=ft.alignment.center, height=100)
return ft.Column([c, ft.GestureDetector(content=ft.Divider(), on_pan_update=move_divider, on_hover=show_draggable_cursor), ft.Container(bgcolor=ft.colors.PINK, alignment=ft.alignment.center, expand=1)], spacing=0, width=400, height=400) |
class ConnectionCall(invoke.Call):
def __init__(self, *args, **kwargs):
init_kwargs = kwargs.pop('init_kwargs')
super().__init__(*args, **kwargs)
self.init_kwargs = init_kwargs
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs['init_kwargs'] = self.init_kwargs
return kwargs
def make_context(self, config):
kwargs = self.init_kwargs
kwargs['config'] = config
return Connection(**kwargs)
def __repr__(self):
ret = super().__repr__()
if self.init_kwargs:
ret = (ret[:(- 1)] + ", host='{}'>".format(self.init_kwargs['host']))
return ret |
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def init_weights(self):
for (name, param) in self.named_parameters():
nn.init.uniform_(param.data, (- 0.08), 0.08)
def forward(self, src, tgt, max_len=None, teacher_forcing_ratio=0.5):
(hidden, cell, outputs) = self.encoder(src)
outputs = self.decoder(tgt, hidden, cell, max_len, teacher_forcing_ratio)
return outputs |
.parametrize('testcase', [5, 6])
def test_scalar_convergence(testcase):
mesh = UnitSquareMesh((2 ** 2), (2 ** 2), quadrilateral=True)
mesh = ExtrudedMesh(mesh, (2 ** 2))
fspace = FunctionSpace(mesh, 'S', testcase)
u = TrialFunction(fspace)
v = TestFunction(fspace)
(x, y, z) = SpatialCoordinate(mesh)
uex = (((x ** testcase) + (y ** testcase)) + ((x ** 2) * (y ** 3)))
f = uex
a = (inner(u, v) * dx(degree=12))
L = (inner(f, v) * dx(degree=12))
sol = Function(fspace)
solve((a == L), sol)
l2err = sqrt(assemble((((sol - uex) * (sol - uex)) * dx)))
assert (l2err < 1e-06) |
def ip_prefix_convert(n, is_ipv6=False):
seq = []
cnt = 4
if is_ipv6:
cnt = 16
a = int((n / 8))
b = (n % 8)
for i in range(a):
seq.append(255)
v = 0
for i in range(b):
v |= (1 << (7 - i))
seq.append(v)
if (len(seq) < cnt):
for i in range((cnt - len(seq))):
seq.append(0)
if is_ipv6:
return socket.inet_ntop(socket.AF_INET6, bytes(seq)[0:cnt])
return socket.inet_ntop(socket.AF_INET, bytes(seq)[0:cnt]) |
class ProjectorBase(object, metaclass=abc.ABCMeta):
def __init__(self, source, target, bcs=None, solver_parameters=None, form_compiler_parameters=None, constant_jacobian=True, use_slate_for_inverse=True):
if (solver_parameters is None):
solver_parameters = {}
else:
solver_parameters = solver_parameters.copy()
solver_parameters.setdefault('ksp_type', 'cg')
solver_parameters.setdefault('ksp_rtol', 1e-08)
mat_type = solver_parameters.get('mat_type', firedrake.parameters['default_matrix_type'])
if (mat_type == 'nest'):
solver_parameters.setdefault('pc_type', 'fieldsplit')
solver_parameters.setdefault('fieldsplit_pc_type', 'bjacobi')
solver_parameters.setdefault('fieldsplit_sub_pc_type', 'icc')
elif (mat_type == 'matfree'):
solver_parameters.setdefault('pc_type', 'jacobi')
else:
solver_parameters.setdefault('pc_type', 'bjacobi')
solver_parameters.setdefault('sub_pc_type', 'icc')
self.source = source
self.target = target
self.solver_parameters = solver_parameters
self.form_compiler_parameters = form_compiler_parameters
self.bcs = bcs
self.constant_jacobian = constant_jacobian
try:
element = self.target.function_space().finat_element
is_dg = (element.entity_dofs() == element.entity_closure_dofs())
is_variable_layers = self.target.function_space().mesh().variable_layers
except AttributeError:
is_dg = False
is_variable_layers = True
self.use_slate_for_inverse = (use_slate_for_inverse and is_dg and (not is_variable_layers) and ((not complex_mode) or SLATE_SUPPORTS_COMPLEX))
_property
def A(self):
u = firedrake.TrialFunction(self.target.function_space())
v = firedrake.TestFunction(self.target.function_space())
F = self.target.function_space()
mixed = isinstance(F.ufl_element(), finat.ufl.MixedElement)
if ((not mixed) and isinstance(F.finat_element, HDivTrace)):
if F.extruded:
a = (((((firedrake.inner(u, v) * firedrake.ds_t) + (firedrake.inner(u, v) * firedrake.ds_v)) + (firedrake.inner(u, v) * firedrake.ds_b)) + (firedrake.inner(u('+'), v('+')) * firedrake.dS_h)) + (firedrake.inner(u('+'), v('+')) * firedrake.dS_v))
else:
a = ((firedrake.inner(u, v) * firedrake.ds) + (firedrake.inner(u('+'), v('+')) * firedrake.dS))
else:
a = (firedrake.inner(u, v) * firedrake.dx)
if self.use_slate_for_inverse:
a = firedrake.Tensor(a).inv
A = firedrake.assemble(a, bcs=self.bcs, mat_type=self.solver_parameters.get('mat_type'), form_compiler_parameters=self.form_compiler_parameters)
return A
_property
def solver(self):
return firedrake.LinearSolver(self.A, solver_parameters=self.solver_parameters)
def apply_massinv(self):
if (not self.constant_jacobian):
firedrake.assemble(self.A.a, tensor=self.A, bcs=self.bcs, form_compiler_parameters=self.form_compiler_parameters)
if self.use_slate_for_inverse:
def solve(x, b):
with x.dat.vec_wo as x_, b.dat.vec_ro as b_:
self.A.petscmat.mult(b_, x_)
return solve
else:
return self.solver.solve
_property
def residual(self):
return firedrake.Cofunction(self.target.function_space().dual())
def rhs(self):
pass
def project(self):
self.apply_massinv(self.target, self.rhs)
return self.target |
def get_cut_text(window, key):
cut_text = ''
new_text = window[key].get()
if (not new_text):
return window.metadata[key]
i = 0
for v in window.metadata[key]:
if ((i >= len(new_text)) or (v != new_text[i])):
cut_text += v
else:
i += 1
return cut_text |
(scope='session')
def redshift_test_engine() -> Generator:
connection_config = ConnectionConfig(name='My Redshift Config', key='test_redshift_key', connection_type=ConnectionType.redshift)
host = (integration_config.get('redshift', {}).get('host') or os.environ.get('REDSHIFT_TEST_HOST'))
port = (integration_config.get('redshift', {}).get('port') or os.environ.get('REDSHIFT_TEST_PORT'))
user = (integration_config.get('redshift', {}).get('user') or os.environ.get('REDSHIFT_TEST_USER'))
password = (integration_config.get('redshift', {}).get('password') or os.environ.get('REDSHIFT_TEST_PASSWORD'))
database = (integration_config.get('redshift', {}).get('database') or os.environ.get('REDSHIFT_TEST_DATABASE'))
db_schema = (integration_config.get('redshift', {}).get('db_schema') or os.environ.get('REDSHIFT_TEST_DB_SCHEMA'))
schema = RedshiftSchema(host=host, port=(int(port) if (port and port.isdigit()) else None), user=user, password=password, database=database, db_schema=db_schema)
connection_config.secrets = schema.dict()
connector: RedshiftConnector = get_connector(connection_config)
engine = connector.client()
(yield engine)
engine.dispose() |
class bad_request_error_msg(error_msg):
version = 3
type = 1
err_type = 1
def __init__(self, xid=None, code=None, data=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (code != None):
self.code = code
else:
self.code = 0
if (data != None):
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.err_type))
packed.append(struct.pack('!H', self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bad_request_error_msg()
_version = reader.read('!B')[0]
assert (_version == 3)
_type = reader.read('!B')[0]
assert (_type == 1)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_err_type = reader.read('!H')[0]
assert (_err_type == 1)
obj.code = reader.read('!H')[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.code != other.code):
return False
if (self.data != other.data):
return False
return True
def pretty_print(self, q):
q.text('bad_request_error_msg {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('code = ')
value_name_map = {0: 'OFPBRC_BAD_VERSION', 1: 'OFPBRC_BAD_TYPE', 2: 'OFPBRC_BAD_STAT', 3: 'OFPBRC_BAD_EXPERIMENTER', 4: 'OFPBRC_BAD_EXPERIMENTER_TYPE', 5: 'OFPBRC_EPERM', 6: 'OFPBRC_BAD_LEN', 7: 'OFPBRC_BUFFER_EMPTY', 8: 'OFPBRC_BUFFER_UNKNOWN', 9: 'OFPBRC_BAD_TABLE_ID', 10: 'OFPBRC_IS_SLAVE', 11: 'OFPBRC_BAD_PORT', 12: 'OFPBRC_BAD_PACKET'}
if (self.code in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.code], self.code)))
else:
q.text(('%#x' % self.code))
q.text(',')
q.breakable()
q.text('data = ')
q.pp(self.data)
q.breakable()
q.text('}') |
_for(Session, 'after_commit')
def send_messages_after_commit(session):
if ('messages' in session.info):
for m in session.info['messages']:
try:
_publish_with_retry(m)
except fml_exceptions.BaseException:
_log.exception('An error occurred publishing %r after a database commit', m)
session.info['messages'] = [] |
def test_peek_eof():
source = b'Hello, world!\n'
source_stream = io.BytesIO(source)
stream = BufferedReader(source_stream.read, (len(source) - 1))
assert (stream.peek(0) == b'')
assert (stream.peek(1) == b'H')
assert (stream.peek(2) == b'He')
assert (stream.peek(16) == b'Hello, world!')
assert (stream.peek(32) == b'Hello, world!')
assert (source_stream.read() == b'\n') |
class TestSkillBehaviour(BaseSkillTestCase):
path_to_skill = Path(ROOT_DIR, 'packages', 'fetchai', 'skills', 'tac_negotiation')
def setup(cls):
tac_dm_context_kwargs = {'goal_pursuit_readiness': GoalPursuitReadiness(), 'ownership_state': OwnershipState(), 'preferences': Preferences()}
super().setup(dm_context_kwargs=tac_dm_context_kwargs)
cls.tac_negotiation = cast(GoodsRegisterAndSearchBehaviour, cls._skill.skill_context.behaviours.tac_negotiation)
cls.oef_search_dialogues = cast(OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues)
cls.goal_pursuit_readiness = cls._skill.skill_context.decision_maker_handler_context.goal_pursuit_readiness
cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)
cls.logger = cls._skill.skill_context.logger
cls.mocked_description = Description({'foo1': 1, 'bar1': 2})
cls.mocked_query = Query([Constraint('tac_service', ConstraintType('==', 'both'))])
cls.sender = str(cls._skill.skill_context.skill_id)
cls.registration_message = OefSearchMessage(dialogue_reference=('', ''), performative=OefSearchMessage.Performative.REGISTER_SERVICE, service_description='some_service_description')
cls.registration_message.sender = str(cls._skill.skill_context.skill_id)
cls.registration_message.to = cls._skill.skill_context.search_service_address
cls.tac_version_id = 'some_tac_version_id'
def test_init(self):
assert (self.tac_negotiation.is_registered is False)
assert (self.tac_negotiation.failed_registration_msg is None)
assert (self.tac_negotiation._nb_retries == 0)
def test_setup(self):
assert (self.tac_negotiation.setup() is None)
self.assert_quantity_in_outbox(0)
def test_act_i(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': True}
self.tac_negotiation.act()
self.assert_quantity_in_outbox(0)
assert (self.skill.skill_context.is_active is False)
def test_act_ii(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.NOT_READY
self.tac_negotiation.act()
self.assert_quantity_in_outbox(0)
def test_act_iii(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.READY
if ('tac_version_id' in self.skill.skill_context._agent_context._shared_state):
self.skill.skill_context._agent_context._shared_state.pop('tac_version_id')
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.act()
self.assert_quantity_in_outbox(0)
mock_logger.assert_any_call(logging.ERROR, 'Cannot get the tac_version_id. Stopping!')
def test_act_iv(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False, 'tac_version_id': self.tac_version_id}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.READY
searching_for_types = [(True, 'sellers'), (False, 'buyers')]
no_searches = len(searching_for_types)
self.tac_negotiation.failed_registration_msg = None
with patch.object(self.strategy, 'get_location_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_register_service_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_location_and_service_query', return_value=self.mocked_query):
with patch.object(type(self.strategy), 'searching_for_types', new_callable=PropertyMock, return_value=searching_for_types):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.act()
self.assert_quantity_in_outbox((no_searches + 1))
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=self.sender, service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, 'registering agent on SOEF.')
for search in searching_for_types:
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.SEARCH_SERVICES, to=self.skill.skill_context.search_service_address, sender=self.sender, query=self.mocked_query)
assert has_attributes, error_str
assert (cast(OefSearchDialogue, self.oef_search_dialogues.get_dialogue(message)).is_seller_search == search[0])
mock_logger.assert_any_call(logging.INFO, f'searching for {search[1]}, search_id={message.dialogue_reference}.')
def test_act_v(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False, 'tac_version_id': self.tac_version_id}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.READY
searching_for_types = [(True, 'sellers'), (False, 'buyers')]
no_searches = len(searching_for_types)
self.tac_negotiation.failed_registration_msg = self.registration_message
with patch.object(self.strategy, 'get_location_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_register_service_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_location_and_service_query', return_value=self.mocked_query):
with patch.object(type(self.strategy), 'searching_for_types', new_callable=PropertyMock, return_value=searching_for_types):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.act()
self.assert_quantity_in_outbox((no_searches + 2))
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=type(self.registration_message), performative=self.registration_message.performative, to=self.registration_message.to, sender=str(self.skill.skill_context.skill_id), service_description=self.registration_message.service_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, f'Retrying registration on SOEF. Retry {self.tac_negotiation._nb_retries} out of {self.tac_negotiation._max_soef_registration_retries}.')
assert (self.tac_negotiation.failed_registration_msg is None)
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=self.sender, service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, 'registering agent on SOEF.')
for search in searching_for_types:
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.SEARCH_SERVICES, to=self.skill.skill_context.search_service_address, sender=self.sender, query=self.mocked_query)
assert has_attributes, error_str
assert (cast(OefSearchDialogue, self.oef_search_dialogues.get_dialogue(message)).is_seller_search == search[0])
mock_logger.assert_any_call(logging.INFO, f'searching for {search[1]}, search_id={message.dialogue_reference}.')
def test_act_vi(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False, 'tac_version_id': self.tac_version_id}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.READY
searching_for_types = [(True, 'sellers'), (False, 'buyers')]
no_searches = len(searching_for_types)
self.tac_negotiation.failed_registration_msg = self.registration_message
self.tac_negotiation._max_soef_registration_retries = 2
self.tac_negotiation._nb_retries = 2
with patch.object(self.strategy, 'get_location_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_register_service_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_location_and_service_query', return_value=self.mocked_query):
with patch.object(type(self.strategy), 'searching_for_types', new_callable=PropertyMock, return_value=searching_for_types):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.act()
self.assert_quantity_in_outbox((no_searches + 1))
assert (self.skill.skill_context.is_active is False)
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=self.sender, service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, 'registering agent on SOEF.')
for search in searching_for_types:
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.SEARCH_SERVICES, to=self.skill.skill_context.search_service_address, sender=self.sender, query=self.mocked_query)
assert has_attributes, error_str
assert (cast(OefSearchDialogue, self.oef_search_dialogues.get_dialogue(message)).is_seller_search == search[0])
mock_logger.assert_any_call(logging.INFO, f'searching for {search[1]}, search_id={message.dialogue_reference}.')
def test_act_vii(self):
self.skill.skill_context._agent_context._shared_state = {'is_game_finished': False, 'tac_version_id': self.tac_version_id}
self.goal_pursuit_readiness._status = GoalPursuitReadiness.Status.READY
self.tac_negotiation.is_registered = True
searching_for_types = [(True, 'sellers'), (False, 'buyers')]
no_searches = len(searching_for_types)
with patch.object(self.strategy, 'get_location_and_service_query', return_value=self.mocked_query):
with patch.object(type(self.strategy), 'searching_for_types', new_callable=PropertyMock, return_value=searching_for_types):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.act()
self.assert_quantity_in_outbox(no_searches)
for search in searching_for_types:
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.SEARCH_SERVICES, to=self.skill.skill_context.search_service_address, sender=self.sender, query=self.mocked_query)
assert has_attributes, error_str
assert (cast(OefSearchDialogue, self.oef_search_dialogues.get_dialogue(message)).is_seller_search == search[0])
mock_logger.assert_any_call(logging.INFO, f'searching for {search[1]}, search_id={message.dialogue_reference}.')
def test_register_service(self):
with patch.object(self.strategy, 'get_register_service_description', return_value=self.mocked_description):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.register_service()
self.assert_quantity_in_outbox(1)
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=str(self.skill.skill_context.skill_id), service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, f'updating service directory as {self.strategy.registering_as}.')
def test_register_genus(self):
with patch.object(self.strategy, 'get_register_personality_description', return_value=self.mocked_description):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.register_genus()
self.assert_quantity_in_outbox(1)
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=str(self.skill.skill_context.skill_id), service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, "registering agent's personality genus on the SOEF.")
def test_register_classification(self):
with patch.object(self.strategy, 'get_register_classification_description', return_value=self.mocked_description):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.register_classification()
self.assert_quantity_in_outbox(1)
message = self.get_message_from_outbox()
(has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=OefSearchMessage, performative=OefSearchMessage.Performative.REGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=str(self.skill.skill_context.skill_id), service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, "registering agent's personality classification on the SOEF.")
def test_teardown_i(self):
self.tac_negotiation.is_registered = True
with patch.object(self.strategy, 'get_unregister_service_description', return_value=self.mocked_description):
with patch.object(self.strategy, 'get_location_description', return_value=self.mocked_description):
with patch.object(self.logger, 'log') as mock_logger:
self.tac_negotiation.teardown()
self.assert_quantity_in_outbox(2)
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=OefSearchMessage, performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=self.sender, service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.DEBUG, f'unregistering from service directory as {self.strategy.registering_as}.')
(has_attributes, error_str) = self.message_has_attributes(actual_message=self.get_message_from_outbox(), message_type=OefSearchMessage, performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, to=self.skill.skill_context.search_service_address, sender=self.sender, service_description=self.mocked_description)
assert has_attributes, error_str
mock_logger.assert_any_call(logging.INFO, 'unregistering agent from SOEF.')
assert (self.tac_negotiation.is_registered is False)
def test_teardown_ii(self):
self.tac_negotiation.is_registered = False
assert (self.tac_negotiation.teardown() is None)
self.assert_quantity_in_outbox(0) |
.compilertest
def test_valid_grpc_stats_services():
yaml = '\n---\napiVersion: getambassador.io/v3alpha1\nkind: Module\nmetadata:\n name: ambassador\n namespace: default\nspec:\n config:\n grpc_stats:\n services:\n - name: echo.EchoService\n method_names: [Echo]\n'
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_no_errors(r1['ir'])
require_no_errors(r2['ir'])
ir = r1['ir'].as_dict()
stats_filters = [f for f in ir['filters'] if (f['name'] == 'grpc_stats')]
assert (len(stats_filters) == 1)
assert (stats_filters[0]['config'] == {'enable_upstream_stats': False, 'individual_method_stats_allowlist': {'services': [{'name': 'echo.EchoService', 'method_names': ['Echo']}]}}) |
def test_liveness_of_one_basicblock_a(construct_graph_one_basicblock, variable_x, variable_v, variable_u):
(nodes, cfg) = construct_graph_one_basicblock
liveness_analysis = LivenessAnalysis(cfg)
assert (liveness_analysis.live_in_of(nodes[0]) == {variable_u[0], variable_v[0], variable_x[0]})
assert (liveness_analysis.live_out_of(nodes[0]) == set()) |
def test___setitem__checks_the_value_ranges_2():
wh = WorkingHours()
with pytest.raises(ValueError) as cm:
wh['sat'] = [[0, 1800]]
assert (str(cm.value) == 'WorkingHours.working_hours value should be a list of lists of two integers between and the range of integers should be 0-1440, not [[0, 1800]]') |
def test_extruded_periodic_boundary_nodes():
mesh = UnitIntervalMesh(2)
extm = ExtrudedMesh(mesh, layers=2, extrusion_type='uniform', periodic=True)
V = FunctionSpace(extm, 'CG', 2)
assert (V.boundary_nodes(1) == np.array([4, 5, 6, 7])).all()
assert (V.boundary_nodes(2) == np.array([16, 17, 18, 19])).all()
assert (V.boundary_nodes('bottom') == np.array([0, 4, 8, 12, 16])).all()
with pytest.raises(ValueError):
assert V.boundary_nodes('top') |
def test_arg_botcdm_returns_errors_as_chat(dummy_backend):
dummy_backend.callback_message(makemessage(dummy_backend, '!returns_first_name_last_name --invalid-parameter'))
assert ("I couldn't parse the arguments; unrecognized arguments: --invalid-parameter" in dummy_backend.pop_message().body) |
def exposed_fetch(url, debug=True, rss_debug=False, special_case_enabled=True):
specialcase_data = WebMirror.rules.load_special_case_sites()
print(('Debug: %s, rss_debug: %s' % (debug, rss_debug)))
if rss_debug:
print('Debugging RSS')
flags.RSS_DEBUG = True
parsed = urllib.parse.urlparse(url)
root = urllib.parse.urlunparse((parsed[0], parsed[1], '', '', '', ''))
if (WebMirror.SpecialCase.haveSpecialCase(specialcase_data, url, parsed.netloc) and special_case_enabled):
WebMirror.SpecialCase.pushSpecialCase(specialcase_data, (- 1), url, parsed.netloc, None)
return
try:
with db.session_context() as sess:
archiver = SiteArchiver(cookie_lock=None, db_interface=sess, new_job_queue=None)
archiver.synchronousJobRequest(url, ignore_cache=True, debug=True)
except Exception as e:
traceback.print_exc() |
def make_git(*prefix_args) -> Optional[Callable]:
git_exe = shutil.which('git')
prefix_args = [str(arg) for arg in prefix_args]
if (not git_exe):
click.secho('Unable to find git', err=True, fg='red')
ctx = click.get_current_context(silent=True)
if (ctx is not None):
ctx.exit(1)
return
def git(*args, print_output=False):
nonlocal prefix_args
if ('-C' not in prefix_args):
prefix_args = (['-C', get_path()] + prefix_args)
full_args = (([git_exe] + prefix_args) + [str(arg) for arg in args])
if print_output:
return subprocess.check_call(full_args)
return subprocess.check_output(full_args, encoding='utf-8').rstrip()
return git |
def test_static_memory_check(compiler):
def callee():
pass
def caller():
x: R
if (1 < 2):
y: (R StaticMemory)
for i in seq(0, 8):
callee()
with pytest.raises(MemGenError, match='Cannot generate static memory in non-leaf procs'):
compiler.compile(caller) |
class LiteSATAPHY(Module, AutoCSR):
def __init__(self, device, pads, gen, clk_freq, refclk=None, data_width=16, qpll=None, gt_type='GTY', use_gtgrefclk=True, with_csr=True):
self.pads = pads
self.gen = gen
self.refclk = refclk
self.enable = Signal()
self.ready = Signal()
if re.match('^xc7k', device):
from litesata.phy.k7sataphy import K7LiteSATAPHYCRG, K7LiteSATAPHY
self.submodules.phy = K7LiteSATAPHY(pads, gen, clk_freq, data_width)
self.submodules.crg = K7LiteSATAPHYCRG(refclk, pads, self.phy, gen)
elif re.match('^xc7a', device):
from litesata.phy.a7sataphy import A7LiteSATAPHYCRG, A7LiteSATAPHY
self.submodules.phy = A7LiteSATAPHY(pads, gen, clk_freq, data_width, tx_buffer_enable=True, qpll=qpll)
self.submodules.crg = A7LiteSATAPHYCRG(refclk, pads, self.phy, gen, tx_buffer_enable=True)
elif re.match('^xc[kv]u[0-9]+-', device):
from litesata.phy.ussataphy import USLiteSATAPHYCRG, USLiteSATAPHY
self.submodules.phy = USLiteSATAPHY(pads, gen, clk_freq, data_width)
self.submodules.crg = USLiteSATAPHYCRG(refclk, pads, self.phy, gen)
elif re.match('^xc([kv]u[0-9]+p-|zu[0-9])', device):
if (gt_type == 'GTY'):
from litesata.phy.uspsataphy import USPLiteSATAPHYCRG, USPLiteSATAPHY
self.submodules.phy = USPLiteSATAPHY(pads, gen, clk_freq, data_width, use_gtgrefclk=use_gtgrefclk)
self.submodules.crg = USPLiteSATAPHYCRG(refclk, pads, self.phy, gen)
elif (gt_type == 'GTH'):
from litesata.phy.gthe4sataphy import GTHE4LiteSATAPHYCRG, GTHE4LiteSATAPHY
self.submodules.phy = GTHE4LiteSATAPHY(pads, gen, clk_freq, data_width, use_gtgrefclk=use_gtgrefclk)
self.submodules.crg = GTHE4LiteSATAPHYCRG(refclk, pads, self.phy, gen)
else:
raise NotImplementedError(f'Unsupported GT type. : {gt_type}')
elif re.match('^LFE5UM5G-', device):
from litesata.phy.ecp5sataphy import ECP5LiteSATAPHYCRG, ECP5LiteSATAPHY
self.submodules.phy = ECP5LiteSATAPHY(refclk, pads, gen, clk_freq, data_width)
self.submodules.crg = ECP5LiteSATAPHYCRG(self.phy)
else:
raise NotImplementedError(f'Unsupported {device} Device.')
self.submodules.ctrl = LiteSATAPHYCtrl(self.phy, self.crg, clk_freq)
self.submodules.datapath = LiteSATAPHYDatapath(self.phy, self.ctrl)
self.comb += [self.ctrl.rx_idle.eq(self.datapath.rx_idle), self.ctrl.misalign.eq(self.datapath.misalign)]
(self.sink, self.source) = (self.datapath.sink, self.datapath.source)
if (hasattr(self.phy, 'tx_init') and hasattr(self.phy, 'rx_init')):
self.comb += self.phy.tx_init.restart.eq((~ self.enable))
self.comb += self.phy.rx_init.restart.eq(((~ self.enable) | self.ctrl.rx_reset))
self.comb += self.ready.eq((self.phy.ready & self.ctrl.ready))
if with_csr:
self.add_csr()
def add_csr(self):
self._enable = CSRStorage(reset=1)
self._status = CSRStatus(fields=[CSRField('ready', size=1, values=[('``0b0``', 'PHY not initialized.'), ('``0b1``', 'PHY initialized and ready.')]), CSRField('tx_ready', size=1, values=[('``0b0``', 'TX not initialized.'), ('``0b1``', 'TX initialized and ready.')]), CSRField('rx_ready', size=1, values=[('``0b0``', 'RX not initialized.'), ('``0b1``', 'RX initialized and ready.')]), CSRField('ctrl_ready', size=1, values=[('``0b0``', 'Ctrl/OOB not initialized.'), ('``0b1``', 'Ctrl/OOB initialized and ready.')])])
self.comb += self.enable.eq(self._enable.storage)
self.comb += self._status.fields.ready.eq((self.phy.ready & self.ctrl.ready))
if (hasattr(self.phy, 'tx_init') and hasattr(self.phy, 'rx_init')):
self.comb += self._status.fields.tx_ready.eq(self.phy.tx_init.done)
self.comb += self._status.fields.rx_ready.eq(self.phy.rx_init.done)
else:
self.comb += self._status.fields.tx_ready.eq(self.phy.ready)
self.comb += self._status.fields.rx_ready.eq(self.phy.ready)
self.comb += self._status.fields.ctrl_ready.eq(self.ctrl.ready) |
class NumpyBackend(Backend):
int = numpy.int64
float = numpy.float64
complex = numpy.complex128
asarray = _replace_float(numpy.asarray)
exp = staticmethod(numpy.exp)
sin = staticmethod(numpy.sin)
cos = staticmethod(numpy.cos)
sum = staticmethod(numpy.sum)
max = staticmethod(numpy.max)
stack = staticmethod(numpy.stack)
transpose = staticmethod(numpy.transpose)
reshape = staticmethod(numpy.reshape)
squeeze = staticmethod(numpy.squeeze)
broadcast_arrays = staticmethod(numpy.broadcast_arrays)
broadcast_to = staticmethod(numpy.broadcast_to)
def bmm(arr1, arr2):
return numpy.einsum('ijk,ikl->ijl', arr1, arr2)
def is_array(arr):
return isinstance(arr, numpy.ndarray)
array = _replace_float(numpy.array)
ones = _replace_float(numpy.ones)
zeros = _replace_float(numpy.zeros)
zeros_like = staticmethod(numpy.zeros_like)
linspace = _replace_float(numpy.linspace)
arange = _replace_float(numpy.arange)
pad = staticmethod(numpy.pad)
fftfreq = staticmethod(numpy.fft.fftfreq)
fft = staticmethod(numpy.fft.fft)
exp = staticmethod(numpy.exp)
divide = staticmethod(numpy.divide)
numpy = _replace_float(numpy.asarray)
def is_complex(x):
return (isinstance(x, complex) or (isinstance(x, numpy.ndarray) and (x.dtype in (numpy.complex64, numpy.complex128)))) |
def choose_middlewares(data: DataModel):
(chosen_middlewares_names, chosen_middlewares_ids) = data.get_selected_middleware_lists()
widget = ReorderBullet(_('3. Choose middlewares (optional).'), choices=chosen_middlewares_names, choices_id=chosen_middlewares_ids)
(middlewares_names, middlewares_ids) = data.get_middleware_lists()
list_widget: Optional[KeyValueBullet]
if middlewares_ids:
list_widget = KeyValueBullet(prompt=_('Choose a middleware to add.'), choices=middlewares_names, choices_id=middlewares_ids)
else:
list_widget = None
while True:
print()
(chosen_middlewares_names, chosen_middlewares_ids, action) = widget.launch()
if (action == 'add'):
print()
if (not list_widget):
print_wrapped(_('No installed middleware is detected, press ENTER to go back.'))
input()
else:
(add_middleware_name, add_middleware_id) = list_widget.launch()
add_middleware_instance = input((_('Instance name to use with {middleware_name}: [{default_instance}]').format(middleware_name=add_middleware_name, default_instance=_('default instance')) + ' ')).strip()
if add_middleware_instance:
add_middleware_id += ('#' + add_middleware_instance)
display_name = data.get_instance_display_name(add_middleware_id)
if (add_middleware_id in widget.choices_id):
print_wrapped(_('{instance_name} ({instance_id}) is already enabled. Please try another one.').format(instance_name=display_name, instance_id=add_middleware_id))
else:
widget.choices.insert((- 2), display_name)
widget.choices_id.insert((- 2), add_middleware_id)
else:
break
data.config['middlewares'] = chosen_middlewares_ids |
class FESpace():
def __init__(self):
pass
def getFESpace(self):
basis = ft.C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = ft.SimplexGaussQuadrature(2, 3)
elementBoundaryQuadrature = ft.SimplexGaussQuadrature(1, 3)
return {'basis': basis, 'elementQuadrature': elementQuadrature, 'elementBoundaryQuadrature': elementBoundaryQuadrature} |
class Player():
name: str
def create_players(cls, names: Optional[List[str]]=None) -> List[Player]:
if (names is None):
names = ['Player 1', 'Player 2']
assert (len(names) == len(set(names))), 'Player names must be unique'
return [cls(name) for name in names] |
class TestZillizVectorDB():
.dict(os.environ, {'ZILLIZ_CLOUD_URI': 'mocked_uri', 'ZILLIZ_CLOUD_TOKEN': 'mocked_token'})
def mock_config(self, mocker):
return mocker.Mock(spec=ZillizDBConfig())
('embedchain.vectordb.zilliz.MilvusClient', autospec=True)
('embedchain.vectordb.zilliz.connections.connect', autospec=True)
def test_zilliz_vector_db_setup(self, mock_connect, mock_client, mock_config):
ZillizVectorDB(config=mock_config)
mock_client.assert_called_once_with(uri=mock_config.uri, token=mock_config.token)
mock_connect.assert_called_once_with(uri=mock_config.uri, token=mock_config.token) |
def measure_for_all_england(request, measure):
measure = get_object_or_404(Measure, pk=measure)
entity_type = request.GET.get('entity_type', 'ccg')
measure_options = {'aggregate': True, 'chartTitleUrlTemplate': _url_template('measure_for_all_ccgs'), 'globalMeasuresUrl': _build_global_measures_url(measure_id=measure.id), 'orgName': 'All {}s in England'.format(entity_type), 'orgType': entity_type.lower(), 'orgTypeHuman': _entity_type_human(entity_type.lower()), 'panelMeasuresUrl': _build_panel_measures_url(entity_type.lower(), measure_id=measure.id, aggregate=True), 'rollUpBy': 'measure_id', 'tagsFocusUrlTemplate': reverse('all_england')}
_add_measure_details(measure_options, measure)
_add_measure_url(measure_options, entity_type)
context = {'entity_type': entity_type, 'measures_url_name': 'measures_for_one_{}'.format(entity_type), 'measure': measure, 'measure_options': measure_options, 'current_at': parse_date(latest_prescribing_date()), 'numerator_breakdown_url': _build_api_url('measure_numerators_by_org', {'org': '', 'org_type': entity_type, 'measure': measure.id})}
return render(request, 'measure_for_one_entity.html', context) |
def _expand_ellipsis(obj, fields):
direct_names = {f.value.name for f in fields if isinstance(f.value, ast.Name)}
for f in fields:
assert isinstance(f, ast.NamedField)
if isinstance(f.value, ast.Ellipsis):
if f.name:
msg = "Cannot use a name for ellipsis (inlining operation doesn't accept a name)"
raise Signal.make(T.SyntaxError, f, msg)
t = obj.type
assert ((t <= T.table) or (t <= T.struct))
for n in f.value.exclude:
if isinstance(n, ast.Marker):
raise AutocompleteSuggestions({k: (0, v) for (k, v) in t.elems.items() if ((k not in direct_names) and (k not in f.value.exclude))})
if (n in direct_names):
raise Signal.make(T.NameError, n, f"Field to exclude '{n}' is explicitely included in projection")
if f.value.from_struct:
with use_scope(obj.all_attrs()):
s = evaluate(f.value.from_struct)
if (not (s.type <= T.struct)):
raise Signal.make(T.TypeError, s, f'Cannot inline objects of type {s.type}')
items = s.attrs
else:
items = obj.all_attrs()
try:
remaining_items = list(_exclude_items(items, set(f.value.exclude), direct_names))
except ValueError as e:
fte = set(e.args[0])
raise Signal.make(T.NameError, obj, f"Fields to exclude '{fte}' not found")
exclude = (direct_names | set(f.value.exclude))
for (name, value) in remaining_items:
assert isinstance(name, str)
assert (name not in exclude)
(yield ast.NamedField(name, value, user_defined=False).set_text_ref(f.text_ref))
else:
(yield f) |
def decodezerostr(barr):
result = ''
for b in range(len(barr)):
if (barr[b] == 0):
try:
result = barr[:b].decode('utf-8')
except:
result = str(barr[:b])
break
if (b >= (len(barr) - 1)):
try:
result = barr.decode('utf-8')
except:
result = str(barr)
return result.strip() |
def to_jfed(topology, path, testbed='wall1.ilabt.iminds.be', encoding='utf-8', prettyprint=True):
if topology.is_directed():
topology = topology.to_undirected()
topology = nx.convert_node_labels_to_integers(topology)
if ('capacity_unit' in topology.graph):
capacity_norm = (units.capacity_units[topology.graph['capacity_unit']] / units.capacity_units['Kbps'])
if ('delay_unit' in topology.graph):
delay_norm = (units.time_units[topology.graph['delay_unit']] / units.time_units['ms'])
delays = get_delays(topology)
capacities = get_capacities(topology)
pos = nx.random_layout(topology)
if_names = {}
for v in topology.adj:
next_hops = sorted(topology.adj[v].keys())
if_names[v] = {next_hop: i for (i, next_hop) in enumerate(next_hops)}
head = ET.Element('rspec')
head.attrib['generated_by'] = 'FNSS'
head.attrib['xsi:schemaLocation'] = '
head.attrib['xmlns'] = '
head.attrib['xmlns:jFed'] = '
head.attrib['xmlns:jFedBonfire'] = '
head.attrib['xmlns:delay'] = '
head.attrib['xmlns:xsi'] = '
for v in topology.nodes():
node = ET.SubElement(head, 'node')
node.attrib['client_id'] = ('node%s' % str(v))
node.attrib['component_manager_id'] = ('urn:publicid:IDN+%s+authority+cm' % testbed)
node.attrib['exclusive'] = 'true'
sliver_type = ET.SubElement(node, 'sliver_type')
sliver_type.attrib['name'] = (topology.node[v]['sliver_type'] if ('sliver_type' in topology.node[v]) else 'raw-pc')
location = ET.SubElement(node, 'jFed:location')
(x, y) = pos[v]
location.attrib['x'] = str((1000 * x))
location.attrib['y'] = str((500 * y))
for if_name in if_names[v].values():
interface = ET.SubElement(node, 'interface')
interface.attrib['client_id'] = ('node%s:if%s' % (str(v), str(if_name)))
link_id = (topology.number_of_nodes() - 1)
for (u, v) in topology.edges():
link_id += 1
link = ET.SubElement(head, 'link')
link.attrib['client_id'] = ('link%s' % str(link_id))
component_manager = ET.SubElement(link, 'component_manager')
component_manager.attrib['name'] = ('urn:publicid:IDN+%s+authority+cm' % testbed)
u_if = ('node%s:if%s' % (str(u), str(if_names[u][v])))
v_if = ('node%s:if%s' % (str(v), str(if_names[v][u])))
for (source, dest) in ((u_if, v_if), (v_if, u_if)):
prop = ET.SubElement(link, 'property')
prop.attrib['source_id'] = source
prop.attrib['dest_id'] = dest
if ((u, v) in delays):
prop.attrib['latency'] = str((delay_norm * delays[(u, v)]))
if ((u, v) in capacities):
prop.attrib['capacity'] = str((capacity_norm * capacities[(u, v)]))
interface_ref = ET.SubElement(link, 'interface_ref')
interface_ref.attrib['client_id'] = source
if prettyprint:
util.xml_indent(head)
ET.ElementTree(head).write(path, encoding=encoding) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'firewall_vipgrp46': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_vipgrp46']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_vipgrp46']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_vipgrp46')
(is_error, has_changed, result, diff) = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def _set_logger():
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
logging_stream_handler = logging.StreamHandler()
logging_stream_handler.setLevel(logging.DEBUG)
logging_formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
logging_stream_handler.setFormatter(logging_formatter)
logger.addHandler(logging_stream_handler)
return logger |
def extractDragomirCM(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ((not postfix) and (':' in item['title'])):
postfix = item['title'].split(':')[(- 1)]
if ('Magic Academy' in item['tags']):
return buildReleaseMessageWithType(item, 'I was reincarnated as a Magic Academy!', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if ('100 Luck' in item['tags']):
return buildReleaseMessageWithType(item, '100 Luck and the Dragon Tamer Skill!', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False |
def is_comment(extension, string):
if ((__comment_begining__.get(extension, None) != None) and string.strip().startswith(__comment_begining__[extension])):
return True
if ((__comment_end__.get(extension, None) != None) and string.strip().endswith(__comment_end__[extension])):
return True
if ((__comment__.get(extension, None) != None) and string.strip().startswith(__comment__[extension])):
return True
return False |
def get_db_engine(*, config: (FidesConfig | None)=None, database_uri: ((str | URL) | None)=None, pool_size: int=50, max_overflow: int=50) -> Engine:
if ((not config) and (not database_uri)):
raise ValueError('Either a config or database_uri is required')
if ((not database_uri) and config):
if config.test_mode:
database_uri = config.database.sqlalchemy_test_database_uri
else:
database_uri = config.database.sqlalchemy_database_uri
return create_engine(database_uri, pool_pre_ping=True, pool_size=pool_size, max_overflow=max_overflow) |
def test_gc_closes_socket(unused_tcp_port):
custom_range = range(unused_tcp_port, (unused_tcp_port + 1))
(_, port, orig_sock) = port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1')
assert (port == unused_tcp_port)
assert (orig_sock is not None)
assert (orig_sock.fileno() != (- 1))
with pytest.raises(port_handler.NoPortsInRangeException):
port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1')
with pytest.raises(port_handler.NoPortsInRangeException):
port_handler.find_available_port(custom_range=custom_range, will_close_then_reopen_socket=True, custom_host='127.0.0.1')
orig_sock = None
(_, port, orig_sock) = port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1')
assert (port == unused_tcp_port)
assert (orig_sock is not None)
assert (orig_sock.fileno() != (- 1)) |
class TestCliGenerateCommand():
def test_should_exit_zero_when_invoked_with_help(self, monkeypatch, fake_project_with_generators, cli_runner):
monkeypatch.chdir(fake_project_with_generators['root'])
result = cli_runner.invoke(get_generate_cmd(), ['--help'])
assert (result.exit_code == 0)
def test_should_exit_error_when_invoked_with_invalid_option(self, cli_runner):
result = cli_runner.invoke(get_generate_cmd(), ['--not_exists'])
assert (result.exit_code == 2)
.parametrize('name', ['controller', 'ctl', 'generator', 'gen', 'foobar', 'foo', 'my-controller', 'my-ctl'])
def test_should_exit_zero_when_invoked_existing_generator(self, monkeypatch, fake_project_with_generators, cli_runner, name):
monkeypatch.chdir(fake_project_with_generators['root'])
result = cli_runner.invoke(get_generate_cmd(), [name, '--help'])
assert (result.exit_code == 0)
def test_should_exit_error_when_invoked_not_existing_generator(self, monkeypatch, fake_project, cli_runner):
monkeypatch.chdir(fake_project['root'])
result = cli_runner.invoke(get_generate_cmd(), ['notexist', '--help'])
assert (result.exit_code == 2) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'dlp_settings': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['dlp_settings']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['dlp_settings']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'dlp_settings')
(is_error, has_changed, result, diff) = fortios_dlp(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class DirectorBackendAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'backend_name': (str,), 'director': (str,)}
_property
def discriminator():
return None
attribute_map = {'backend_name': 'backend_name', 'director': 'director'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class FaceCollection():
def __init__(self, api_key: str, domain: str, port: str, options: AllOptionsDict={}):
self.available_services = []
self.api_key = api_key
self.options = options
self.add_example: AddExampleOfSubject = AddExampleOfSubject(domain=domain, port=port, api_key=api_key)
self.list_of_all_saved_subjects: ListOfAllSavedSubjects = ListOfAllSavedSubjects(domain=domain, port=port, api_key=api_key)
self.delete_all_examples_of_subject_by_name: DeleteAllExamplesOfSubjectByName = DeleteAllExamplesOfSubjectByName(domain=domain, port=port, api_key=api_key)
self.delete_all_examples_by_id: DeleteExampleById = DeleteExampleById(domain=domain, port=port, api_key=api_key)
self.verify_face_from_image: VerificationFaceFromImage = VerificationFaceFromImage(domain=domain, port=port, api_key=api_key)
def list(self) -> dict:
return self.list_of_all_saved_subjects.execute()
def add(self, image_path: str, subject: str, options: DetProbOptionsDict={}) -> dict:
request = AddExampleOfSubject.Request(api_key=self.api_key, image_path=image_path, subject=subject)
return self.add_example.execute(request, (pass_dict(options, DetProbOptionsDict) if (options == {}) else options))
def delete(self, image_id: str) -> dict:
request = DeleteExampleById.Request(api_key=self.api_key, image_id=image_id)
return self.delete_all_examples_by_id.execute(request)
def delete_all(self, subject: str) -> dict:
request = DeleteAllExamplesOfSubjectByName.Request(api_key=self.api_key, subject=subject)
return self.delete_all_examples_of_subject_by_name.execute(request)
def verify(self, image_path: str, image_id: str, options: ExpandedOptionsDict={}) -> dict:
request = VerificationFaceFromImage.Request(api_key=self.api_key, image_path=image_path, image_id=image_id)
return self.verify_face_from_image.execute(request, (pass_dict(options, ExpandedOptionsDict) if (options == {}) else options)) |
def read_version_from_file(filename: str) -> Optional[str]:
with open(filename, 'r') as file:
version_content = file.read()
version_match = re.search('version=(\\d+\\.\\d+\\.\\d+)', version_content)
if version_match:
version = version_match.group(1)
else:
version = None
return version |
class EmbeddingEngingOperator(MapOperator[(ChatContext, ChatContext)]):
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def map(self, input_value: ChatContext) -> ChatContext:
from dbgpt.configs.model_config import EMBEDDING_MODEL_CONFIG
from dbgpt.rag.embedding_engine.embedding_engine import EmbeddingEngine
from dbgpt.rag.embedding_engine.embedding_factory import EmbeddingFactory
knowledge_space = input_value.select_param
vector_store_config = {'vector_store_name': knowledge_space, 'vector_store_type': CFG.VECTOR_STORE_TYPE}
embedding_factory = self.system_app.get_component('embedding_factory', EmbeddingFactory)
knowledge_embedding_client = EmbeddingEngine(model_name=EMBEDDING_MODEL_CONFIG[CFG.EMBEDDING_MODEL], vector_store_config=vector_store_config, embedding_factory=embedding_factory)
space_context = (await self._get_space_context(knowledge_space))
top_k = (CFG.KNOWLEDGE_SEARCH_TOP_SIZE if (space_context is None) else int(space_context['embedding']['topk']))
max_token = (CFG.KNOWLEDGE_SEARCH_MAX_TOKEN if ((space_context is None) or (space_context.get('prompt') is None)) else int(space_context['prompt']['max_token']))
input_value.prompt_template.template_is_strict = False
if (space_context and space_context.get('prompt')):
input_value.prompt_template.template_define = space_context['prompt']['scene']
input_value.prompt_template.template = space_context['prompt']['template']
docs = (await self.blocking_func_to_async(knowledge_embedding_client.similar_search, input_value.current_user_input, top_k))
if ((not docs) or (len(docs) == 0)):
print('no relevant docs to retrieve')
context = 'no relevant docs to retrieve'
else:
context = [d.page_content for d in docs]
context = context[:max_token]
relations = list(set([os.path.basename(str(d.metadata.get('source', ''))) for d in docs]))
input_value.input_values = {'context': context, 'question': input_value.current_user_input, 'relations': relations}
return input_value
async def _get_space_context(self, space_name):
from dbgpt.app.knowledge.service import KnowledgeService
service = KnowledgeService()
return (await self.blocking_func_to_async(service.get_space_context, space_name)) |
class HTTPProxyGETTestCase(HTTPProxyTestCase):
def setUp(self):
super().setUp()
self.method = 'GET'
.object( 'resolve')
_run_loop
async def test_get_valid_request(self, resolve):
resolve.return_value = echo_dns_q(self.dnsq)
params = utils.build_query_params(self.dnsq.to_wire())
request = (await self.client.request(self.method, self.endpoint, params=params))
self.assertEqual(request.status, 200)
content = (await request.read())
self.assertEqual(self.dnsq, dns.message.from_wire(content))
.object( 'resolve')
_run_loop
async def test_get_request_bad_content_type(self, resolve):
resolve.return_value = echo_dns_q(self.dnsq)
params = utils.build_query_params(self.dnsq.to_wire())
params['ct'] = 'bad/type'
request = (await self.client.request(self.method, self.endpoint, params=params))
self.assertEqual(request.status, 200)
content = (await request.read())
self.assertEqual(self.dnsq, dns.message.from_wire(content))
.object( 'resolve')
_run_loop
async def test_get_request_no_content_type(self, resolve):
resolve.return_value = echo_dns_q(self.dnsq)
params = utils.build_query_params(self.dnsq.to_wire())
request = (await self.client.request(self.method, self.endpoint, params=params))
self.assertEqual(request.status, 200)
content = (await request.read())
self.assertEqual(self.dnsq, dns.message.from_wire(content))
_run_loop
async def test_get_request_empty_body(self):
params = utils.build_query_params(self.dnsq.to_wire())
params[constants.DOH_DNS_PARAM] = ''
request = (await self.client.request(self.method, self.endpoint, params=params))
self.assertEqual(request.status, 400)
content = (await request.read())
self.assertEqual(content, b'Missing Body')
_run_loop
async def test_get_request_bad_dns_request(self):
params = utils.build_query_params(self.dnsq.to_wire())
params[constants.DOH_DNS_PARAM] = 'dummy'
request = (await self.client.request(self.method, self.endpoint, params=params))
self.assertEqual(request.status, 400)
content = (await request.read())
self.assertEqual(content, b'Invalid Body Parameter') |
class testNetAsciiReader(unittest.TestCase):
def testNetAsciiReader(self):
tests = [('foo\nbar\nand another\none', bytearray(b'foo\r\nbar\r\nand another\r\none')), ('foo\r\nbar\r\nand another\r\none', bytearray(b'foo\r\x00\r\nbar\r\x00\r\nand another\r\x00\r\none'))]
for (input_content, expected) in tests:
with self.subTest(content=input_content):
resp_data = StringResponseData(input_content)
n = NetasciiReader(resp_data)
self.assertGreater(n.size(), len(input_content))
output = n.read(512)
self.assertEqual(output, expected)
n.close()
def testNetAsciiReaderBig(self):
input_content = 'I\nlike\ncrunchy\nbacon\n'
for _ in range(5):
input_content += input_content
resp_data = StringResponseData(input_content)
n = NetasciiReader(resp_data)
self.assertGreater(n.size(), 0)
self.assertGreater(n.size(), len(input_content))
block_size = 512
output = bytearray()
while True:
c = n.read(block_size)
output += c
if (len(c) < block_size):
break
self.assertEqual(input_content.count('\n'), output.count(b'\r\n'))
n.close() |
class DictMeta(Meta):
def __getitem__(self, types):
(type_keys, type_values) = types
if isinstance(type_keys, str):
type_keys = str2type(type_keys)
if isinstance(type_values, str):
type_values = str2type(type_values)
return type('DictBis', (Dict,), {'type_keys': type_keys, 'type_values': type_values})
def get_template_parameters(self):
template_params = []
if hasattr(self.type_keys, 'get_template_parameters'):
template_params.extend(self.type_keys.get_template_parameters())
if hasattr(self.type_values, 'get_template_parameters'):
template_params.extend(self.type_values.get_template_parameters())
return template_params
def __repr__(self):
if (not hasattr(self, 'type_keys')):
return super().__repr__()
if isinstance(self.type_keys, type):
key = self.type_keys.__name__
else:
key = repr(self.type_keys)
if isinstance(self.type_values, type):
value = self.type_values.__name__
else:
value = repr(self.type_values)
return f'Dict[{key}, {value}]'
def format_as_backend_type(self, backend_type_formatter, **kwargs):
return backend_type_formatter.make_dict_code(self.type_keys, self.type_values, **kwargs) |
class LTImage(LTComponent):
def __init__(self, name, stream, bbox):
LTComponent.__init__(self, bbox)
self.name = name
self.stream = stream
self.srcsize = (stream.get_any(('W', 'Width')), stream.get_any(('H', 'Height')))
self.imagemask = stream.get_any(('IM', 'ImageMask'))
self.bits = stream.get_any(('BPC', 'BitsPerComponent'), 1)
self.colorspace = stream.get_any(('CS', 'ColorSpace'))
if (not isinstance(self.colorspace, list)):
self.colorspace = [self.colorspace]
return
def __repr__(self):
return ('<%s(%s) %s %r>' % (self.__class__.__name__, self.name, bbox2str(self.bbox), self.srcsize)) |
_blueprint.route('/project/<project_name>')
_blueprint.route('/project/<project_name>/')
def project_name(project_name):
page = flask.request.args.get('page', 1)
try:
page = int(page)
except ValueError:
page = 1
projects = models.Project.search(Session, pattern=project_name, page=page)
projects_count = models.Project.search(Session, pattern=project_name, count=True)
if (projects_count == 1):
return project(projects[0].id)
total_page = int(ceil((projects_count / float(50))))
return flask.render_template('search.html', current='projects', pattern=project_name, projects=projects, total_page=total_page, projects_count=projects_count, page=page) |
class OptionPlotoptionsWaterfallSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def hass_announce_sensor_input(sens_conf: ConfigType, mqtt_conf: ConfigType, mqtt_options: MQTTClientOptions) -> MQTTMessageSend:
disco_conf: ConfigType = mqtt_conf['ha_discovery']
name: str = sens_conf['name']
prefix: str = mqtt_conf['topic_prefix']
disco_prefix: str = disco_conf['prefix']
sensor_config = get_common_config(sens_conf, mqtt_conf, mqtt_options)
sensor_config.update(dict(unique_id=f"{mqtt_options.client_id}_{sens_conf['module']}_sensor_{name}", state_topic='/'.join((prefix, SENSOR_TOPIC, name))))
if ('expire_after' not in sensor_config):
sensor_config['expire_after'] = ((sens_conf['interval'] * 2) + 5)
return MQTTMessageSend('/'.join((disco_prefix, sensor_config.pop('component', 'sensor'), mqtt_options.client_id, name, 'config')), json.dumps(sensor_config).encode('utf8'), retain=True) |
class ScrollBar():
def __init__(self, printer: ColorPrinter, lines: Dict[(int, LineBase)], screen_control: 'Controller'):
self.printer = printer
self.screen_control = screen_control
self.num_lines = len(lines)
self.box_start_fraction = 0.0
self.box_stop_fraction = 0.0
self.calc_box_fractions()
self.activated = True
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
if (self.num_lines < max_y):
self.activated = False
logger.add_event('no_scrollbar')
else:
logger.add_event('needed_scrollbar')
def get_is_activated(self) -> bool:
return self.activated
def calc_box_fractions(self) -> None:
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
frac_displayed = min(1.0, (max_y / float(self.num_lines)))
self.box_start_fraction = ((- self.screen_control.get_scroll_offset()) / float(self.num_lines))
self.box_stop_fraction = (self.box_start_fraction + frac_displayed)
def output(self) -> None:
if (not self.activated):
return
for func in [self.output_caps, self.output_base, self.output_box, self.output_border]:
try:
func()
except curses.error:
pass
def get_min_y(self) -> int:
return (self.screen_control.get_chrome_boundaries()[1] + 1)
def get_x(self) -> int:
return 0
def output_border(self) -> None:
x_pos = (self.get_x() + 4)
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
for y_pos in range(0, max_y):
self.printer.addstr(y_pos, x_pos, ' ')
def output_box(self) -> None:
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
top_y = (max_y - 2)
min_y = self.get_min_y()
diff = (top_y - min_y)
x_pos = self.get_x()
box_start_y = (int((diff * self.box_start_fraction)) + min_y)
box_stop_y = (int((diff * self.box_stop_fraction)) + min_y)
self.printer.addstr(box_start_y, x_pos, '/-\\')
for y_pos in range((box_start_y + 1), box_stop_y):
self.printer.addstr(y_pos, x_pos, '|-|')
self.printer.addstr(box_stop_y, x_pos, '\\-/')
def output_caps(self) -> None:
x_pos = self.get_x()
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
for y_pos in [(self.get_min_y() - 1), (max_y - 1)]:
self.printer.addstr(y_pos, x_pos, '===')
def output_base(self) -> None:
x_pos = self.get_x()
(max_y, _max_x) = self.screen_control.get_screen_dimensions()
for y_pos in range(self.get_min_y(), (max_y - 1)):
self.printer.addstr(y_pos, x_pos, ' . ') |
class ReqStatus(ReqTagGeneric):
def __init__(self, config):
ReqTagGeneric.__init__(self, config, 'Status', set([InputModuleTypes.reqtag]))
def rewrite(self, rid, req):
self.check_mandatory_tag(rid, req, 16)
tag = req[self.get_tag()].get_content()
val = create_requirement_status(self.get_config(), rid, tag)
del req[self.get_tag()]
return (self.get_tag(), val) |
def convert_to_data_url(view: sublime.View, edit: sublime.Edit, region: sublime.Region):
max_size = emmet.get_settings('max_data_url', 0)
src = view.substr(region)
abs_file = None
if utils.is_url(src):
abs_file = src
elif view.file_name():
abs_file = utils.locate_file(view.file_name(), src)
if (abs_file and max_size and (os.path.getsize(abs_file) > max_size)):
print(('Size of %s file is too large. Check "emmet_max_data_url" setting to increase this limit' % abs_file))
return
if abs_file:
data = utils.read_file(abs_file)
if (data and ((not max_size) or (len(data) <= max_size))):
ext = os.path.splitext(abs_file)[1]
if (ext in mime_types):
new_src = ('data:%s;base64,%s' % (mime_types[ext], base64.urlsafe_b64encode(data).decode('utf8')))
view.replace(edit, region, new_src) |
class SQLConnector(BaseConnector[Engine]):
secrets_schema: Type[ConnectionConfigSecretsSchema]
def __init__(self, configuration: ConnectionConfig):
super().__init__(configuration)
if (not self.secrets_schema):
raise NotImplementedError('SQL Connectors must define their secrets schema class')
self.ssh_server: sshtunnel._ForwardServer = None
def cursor_result_to_rows(results: CursorResult) -> List[Row]:
columns: List[Column] = results.cursor.description
rows = []
for row_tuple in results:
rows.append({col.name: row_tuple[count] for (count, col) in enumerate(columns)})
return rows
def default_cursor_result_to_rows(results: LegacyCursorResult) -> List[Row]:
columns: List[Column] = results.cursor.description
rows = []
for row_tuple in results:
rows.append({col[0]: row_tuple[count] for (count, col) in enumerate(columns)})
return rows
def build_uri(self) -> str:
def query_config(self, node: TraversalNode) -> SQLQueryConfig:
return SQLQueryConfig(node)
def test_connection(self) -> Optional[ConnectionTestStatus]:
logger.info('Starting test connection to {}', self.configuration.key)
try:
engine = self.client()
with engine.connect() as connection:
connection.execute('select 1')
except OperationalError:
raise ConnectionException(f'Operational Error connecting to {self.configuration.connection_type.value} db.')
except InternalError:
raise ConnectionException(f'Internal Error connecting to {self.configuration.connection_type.value} db.')
except Exception:
raise ConnectionException('Connection error.')
return ConnectionTestStatus.succeeded
def retrieve_data(self, node: TraversalNode, policy: Policy, privacy_request: PrivacyRequest, input_data: Dict[(str, List[Any])]) -> List[Row]:
query_config = self.query_config(node)
client = self.client()
stmt: Optional[TextClause] = query_config.generate_query(input_data, policy)
if (stmt is None):
return []
logger.info('Starting data retrieval for {}', node.address)
with client.connect() as connection:
self.set_schema(connection)
results = connection.execute(stmt)
return self.cursor_result_to_rows(results)
def mask_data(self, node: TraversalNode, policy: Policy, privacy_request: PrivacyRequest, rows: List[Row], input_data: Dict[(str, List[Any])]) -> int:
query_config = self.query_config(node)
update_ct = 0
client = self.client()
for row in rows:
update_stmt: Optional[TextClause] = query_config.generate_update_stmt(row, policy, privacy_request)
if (update_stmt is not None):
with client.connect() as connection:
self.set_schema(connection)
results: LegacyCursorResult = connection.execute(update_stmt)
update_ct = (update_ct + results.rowcount)
return update_ct
def close(self) -> None:
if self.db_client:
logger.debug(' disposing of {}', self.__class__)
self.db_client.dispose()
if self.ssh_server:
self.ssh_server.stop()
def create_client(self) -> Engine:
uri = ((self.configuration.secrets or {}).get('url') or self.build_uri())
return create_engine(uri, hide_parameters=self.hide_parameters, echo=(not self.hide_parameters))
def set_schema(self, connection: Connection) -> None:
def create_ssh_tunnel(self, host: Optional[str], port: Optional[int]) -> None:
if (not CONFIG.security.bastion_server_ssh_private_key):
raise SSHTunnelConfigNotFoundException('Fides is configured to use an SSH tunnel without config provided.')
with io.BytesIO(CONFIG.security.bastion_server_ssh_private_key.encode('utf8')) as binary_file:
with io.TextIOWrapper(binary_file, encoding='utf8') as file_obj:
private_key = paramiko.RSAKey.from_private_key(file_obj=file_obj)
self.ssh_server = sshtunnel.SSHTunnelForwarder(CONFIG.security.bastion_server_host, ssh_username=CONFIG.security.bastion_server_ssh_username, ssh_pkey=private_key, remote_bind_address=(host, port)) |
def call_multi_core(pInteractionFilesList, pArgs, pViewpointObj, pBackground, pFilePath, pResolution):
significant_data_list = ([None] * pArgs.threads)
significant_key_list = ([None] * pArgs.threads)
target_data_list = ([None] * pArgs.threads)
target_key_list = ([None] * pArgs.threads)
reference_points_list_target = ([None] * pArgs.threads)
reference_points_list_significant = ([None] * pArgs.threads)
interactionFilesPerThread = (len(pInteractionFilesList) // pArgs.threads)
all_data_collected = False
queue = ([None] * pArgs.threads)
process = ([None] * pArgs.threads)
thread_done = ([False] * pArgs.threads)
fail_flag = False
fail_message = ''
for i in range(pArgs.threads):
if (i < (pArgs.threads - 1)):
interactionFileListThread = pInteractionFilesList[(i * interactionFilesPerThread):((i + 1) * interactionFilesPerThread)]
else:
interactionFileListThread = pInteractionFilesList[(i * interactionFilesPerThread):]
queue[i] = Queue()
process[i] = Process(target=compute_interaction_file, kwargs=dict(pInteractionFilesList=interactionFileListThread, pArgs=pArgs, pViewpointObj=pViewpointObj, pBackground=pBackground, pFilePath=pFilePath, pResolution=pResolution, pQueue=queue[i]))
process[i].start()
while (not all_data_collected):
for i in range(pArgs.threads):
if ((queue[i] is not None) and (not queue[i].empty())):
background_data_thread = queue[i].get()
if ('Fail:' in background_data_thread):
fail_flag = True
fail_message = background_data_thread[6:]
else:
(significant_data_list[i], significant_key_list[i], target_data_list[i], target_key_list[i], reference_points_list_target[i], reference_points_list_significant[i]) = background_data_thread
queue[i] = None
process[i].join()
process[i].terminate()
process[i] = None
thread_done[i] = True
all_data_collected = True
for thread in thread_done:
if (not thread):
all_data_collected = False
time.sleep(1)
if fail_flag:
log.error(fail_message)
exit(1)
significant_data_list = [item for sublist in significant_data_list for item in sublist]
significant_key_list = np.array([item for sublist in significant_key_list for item in sublist])
reference_points_list_target = np.array([item for sublist in reference_points_list_target for item in sublist])
reference_points_list_significant = np.array([item for sublist in reference_points_list_significant for item in sublist])
(significant_key_list, indices) = np.unique(significant_key_list, axis=0, return_index=True)
significant_data_list_new = []
for x in indices:
significant_data_list_new.append(significant_data_list[x])
significant_data_list = significant_data_list_new
target_data_list = [item for sublist in target_data_list for item in sublist]
target_key_list = [item for sublist in target_key_list for item in sublist]
return (significant_data_list, significant_key_list, target_data_list, target_key_list, reference_points_list_target, reference_points_list_significant) |
class OptionPlotoptionsVariwideSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TransactionFPDSSerializer(LimitableSerializer):
prefetchable = False
class Meta():
model = TransactionFPDS
fields = '__all__'
default_fields = ['piid', 'parent_award_piid', 'type', 'type_description', 'cost_or_pricing_data', 'type_of_contract_pricing', 'type_of_contract_pricing_description', 'naics', 'naics_description', 'product_or_service_code'] |
def filter_string(invs: (str | None), domains: (str | None), otype: (str | None), target: (str | None), *, delimiter: str=':') -> str:
str_items = []
for item in (invs, domains, otype, target):
if (item is None):
str_items.append('*')
elif (delimiter in item):
str_items.append(f'"{item}"')
else:
str_items.append(f'{item}')
return delimiter.join(str_items) |
def upgrade():
op.create_table('service', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name'))
op.add_column(u'permissions', sa.Column('can_create', sa.Boolean(), nullable=False))
op.add_column(u'permissions', sa.Column('can_delete', sa.Boolean(), nullable=False))
op.add_column(u'permissions', sa.Column('can_read', sa.Boolean(), nullable=False))
op.add_column(u'permissions', sa.Column('can_update', sa.Boolean(), nullable=False))
op.add_column(u'permissions', sa.Column('role_id', sa.Integer(), nullable=True))
op.alter_column(u'permissions', 'service_id', existing_type=sa.INTEGER(), nullable=True)
op.create_unique_constraint('role_service_uc', 'permissions', ['role_id', 'service_id'])
op.drop_constraint(u'user_service_uc', 'permissions', type_='unique')
op.drop_constraint(u'permissions_user_id_fkey', 'permissions', type_='foreignkey')
op.create_foreign_key(None, 'permissions', 'role', ['role_id'], ['id'])
op.create_foreign_key(None, 'permissions', 'service', ['service_id'], ['id'])
op.drop_column(u'permissions', 'user_id')
op.drop_column(u'permissions', 'modes')
op.drop_column(u'permissions', 'service')
op.alter_column(u'role', 'name', existing_type=sa.VARCHAR(length=128), nullable=False)
op.create_unique_constraint(None, 'role', ['name'])
op.drop_column(u'user', 'role') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.