code stringlengths 281 23.7M |
|---|
_meta(equipment.YinYangOrb)
class YinYangOrb():
def effect_string(self, act):
return f'{N.char(act.target)}{N.card(act.card)}'
def detach_cards_tip(self, trans: actions.MigrateCardsTransaction, cards: List[Card]) -> str:
return f'{N.char(trans.action.source)}'
def drop_cards_tip(self, trans: actions.MigrateCardsTransaction) -> str:
return '' |
def test_definitions_as_mapping():
artist = typesystem.Schema(fields={'name': typesystem.String(max_length=100)})
definitions = typesystem.Definitions()
definitions['Artist'] = artist
album = typesystem.Schema(fields={'title': typesystem.String(max_length=100), 'release_date': typesystem.Date(), 'artist': typesystem.Reference(to='Artist', definitions=definitions)})
definitions['Album'] = album
assert (definitions['Album'] == album)
assert (definitions['Artist'] == artist)
assert (dict(definitions) == {'Album': album, 'Artist': artist})
assert (len(definitions) == 2)
del definitions['Artist'] |
def get_prompt_selenium(instruction, code=False):
prompt = f'''
You have an instance `env` with the following methods:
- `env.driver.find_elements(by='id', value=None)` which finds and returns list of WebElement. The arguement `by` is a string that specifies the locator strategy. The arguement `value` is a string that specifies the locator value. `by` is usually `xpath` and `value` is the xpath of the element.
- `env.find_nearest(e, xpath)` can only be used to locate an element that matches the xpath near element e.
- `env.send_keys(text)` is only used to type in string `text`. string ENTER is Keys.ENTER
- `env.get(url)` goes to url.
- `env.get_openai_response(text)` that ask AI about a string `text`.
- `env.click(element)` clicks the element.
WebElement has functions:
1. `element.text` returns the text of the element
2. `element.get_attribute(attr)` returns the value of the attribute of the element. If the attribute does not exist, it returns ''.
3. `element.find_elements(by='id', value=None)` it's the same as `env.driver.find_elements()` except that it only searches the children of the element.
4. `element.is_displayed()` returns if the element is visible
The xpath of a textbox is usually "//div[ = 'textarea']|//div[ = 'textbox']|//input".
The xpath of text is usually "//*[string-length(text()) > 0]".
The xpath for a button is usually "//div[ = 'button']|//button".
The xpath for an element whose text is "text" is "//*[text() = 'text']".
The xpath for the tweet is "//span[contains(text(), '')]".
The xpath for the like button is "//div[ != '' and -testid='like']|//button".
The xpath for the unlike button is "//div[ != '' and -testid='unlike']|//button".
Your code must obey the following constraints:
1. respect the lowercase and uppercase letters in the instruction.
2. Does not call any functions besides those given above and those defined by the base language spec.
3. has correct indentation.
4. only write code
5. only do what I instructed you to do.
{instruction}
```python'''
if code:
prompt = ((('"' * 3) + prompt) + ('"' * 3))
return prompt |
class BrightnessControl(base._Widget, ExtendedPopupMixin, ProgressBarMixin):
orientations = base.ORIENTATION_HORIZONTAL
defaults: list[tuple[(str, Any, str)]] = [('font', 'sans', 'Default font'), ('fontsize', None, 'Font size'), ('foreground', 'ffffff', 'Colour of text.'), ('text_format', '{percentage}%', 'Text to display.'), ('bar_colour', '008888', 'Colour of bar displaying brightness level.'), ('error_colour', '880000', 'Colour of bar when displaying an error'), ('timeout_interval', 5, 'Time before widet is hidden.'), ('enable_power_saving', False, 'Automatically set brightness depending on status. Note: this is not checked when the widget is first started.'), ('brightness_on_mains', '100%', 'Brightness level on mains power (accepts integer valueor percentage as string)'), ('brightness_on_battery', '50%', 'Brightness level on battery power (accepts integer value or percentage as string)'), ('device', '/sys/class/backlight/intel_backlight', 'Path to backlight device'), ('step', '5%', 'Amount to change brightness (accepts int or percentage as string)'), ('brightness_path', 'brightness', 'Name of file holding brightness value'), ('max_brightness_path', 'max_brightness', 'Name of file holding max brightness value'), ('min_brightness', 100, 'Minimum brightness. Do not set to 0!'), ('max_brightness', None, 'Set value or leave as None to allow device maximum'), ('mode', 'bar', "Display mode: 'bar' shows bar in widget, 'popup' to display a popup window"), ('popup_layout', BRIGHTNESS_NOTIFICATION, 'Layout for popup mode'), ('popup_hide_timeout', 5, 'Time before popup hides'), ('popup_show_args', {'relative_to': 2, 'relative_to_bar': True, 'y': 50}, 'Control position of popup')]
_screenshots = [('brightnesscontrol-demo.gif', '')]
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
ExtendedPopupMixin.__init__(self, **config)
ProgressBarMixin.__init__(self, **config)
self.add_defaults(ExtendedPopupMixin.defaults)
self.add_defaults(ProgressBarMixin.defaults)
self.add_defaults(BrightnessControl.defaults)
if ('font_colour' in config):
self.foreground = config['font_colour']
logger.warning('The use of `font_colour` is deprecated. Please update your config to use `foreground` instead.')
if ('widget_width' in config):
self.bar_width = config['widget_width']
logger.warning('The use of `widget_width` is deprecated. Please update your config to use `bar_width` instead.')
self.add_callbacks({'Button4': self.brightness_up, 'Button5': self.brightness_down})
self.update_timer = None
self.percentage = (- 1)
self.onbattery = False
self.hidden = True
self.show_bar = (self.mode == 'bar')
self.bright_path = os.path.join(self.device, self.brightness_path)
self.min = self.min_brightness
if self.max_brightness_path:
self.max_path = os.path.join(self.device, self.max_brightness_path)
self.max = self.get_max()
if self.max_brightness:
self.max = min(self.max, self.max_brightness)
elif self.max_brightness:
self.max = self.max_brightness
else:
logger.warning('No maximum brightness defined. Setting to default value of 500. The script may behave unexpectedly.')
self.max = 500
if isinstance(self.step, str):
if self.step.endswith('%'):
self.step = self.step[:(- 1)]
val = int(self.step)
self.step = int(((self.max * val) / 100))
self.current = self.get_current()
self.old = 0
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.text_width = self.max_text_width()
async def _config_async(self):
if (not self.enable_power_saving):
return
subscribe = (await add_signal_receiver(self.message, session_bus=False, signal_name='PropertiesChanged', path='/org/freedesktop/UPower', dbus_interface='org.freedesktop.DBus.Properties'))
if (not subscribe):
msg = 'Unable to add signal receiver for UPower events.'
logger.warning(msg)
def message(self, message):
self.update(*message.body)
def update(self, _interface_name, changed_properties, _invalidated_properties):
if ('OnBattery' not in changed_properties):
return
onbattery = changed_properties['OnBattery'].value
if (onbattery != self.onbattery):
if onbattery:
value = self.brightness_on_battery
else:
value = self.brightness_on_mains
if isinstance(value, int):
self.set_brightness_value(value)
elif (isinstance(value, str) and value.endswith('%')):
try:
percent = int(value[:(- 1)])
self.set_brightness_percent((percent / 100))
except ValueError:
logger.error('Incorrectly formatted brightness: %s', value)
else:
logger.warning('Unrecognised value for brightness: %s', value)
self.onbattery = onbattery
def max_text_width(self):
(width, _) = self.drawer.max_layout_size([self.text_format.format(percentage=100)], self.font, self.fontsize)
return width
def status_change(self, percentage):
if self.show_bar:
self.hidden = False
self.percentage = percentage
self.bar.draw()
if self.show_bar:
self.set_timer()
if (self.mode == 'popup'):
self.update_or_show_popup()
def _update_popup(self):
brightness = self.percentage
label = f'Brightness {brightness:.0%}'
self.extended_popup.update_controls(brightness=brightness, text=label)
def draw(self):
self.drawer.clear((self.background or self.bar.background))
if (self.percentage >= 0):
bar_colour = self.bar_colour
percentage = int((self.percentage * 100))
bar_text = self.text_format.format(percentage=percentage)
value = self.percentage
else:
bar_colour = self.error_colour
bar_text = '!'
value = (self.percentage or 1)
self.draw_bar(bar_colour=bar_colour, bar_text=bar_text, bar_value=value)
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
def set_timer(self):
if self.update_timer:
self.update_timer.cancel()
self.update_timer = self.timeout_add(self.timeout_interval, self.hide)
def hide(self):
self.hidden = True
self.bar.draw()
def calculate_length(self):
if self.hidden:
return 0
else:
return max(self.text_width, self.bar_width)
def change_brightness(self, step):
self.current = self.get_current()
if (self.current and self.max):
newval = (self.current + step)
self._set_brightness(newval)
else:
self._set_brightness(ERROR_VALUE)
def _set_brightness(self, value):
if (value != ERROR_VALUE):
newval = max(min(value, self.max), self.min)
if (newval != self.old):
success = self._set_current(newval)
percentage = ((newval / self.max) if success else ERROR_VALUE)
self.status_change(percentage)
self.old = newval
self.current = newval
else:
self.status_change(ERROR_VALUE)
def _read(self, path):
try:
with open(path, 'r') as b:
value = int(b.read())
except PermissionError:
logger.error('Unable to read %s.', path)
value = False
except ValueError:
logger.error('Unexpected value when reading %s.', path)
value = False
except Exception as e:
logger.error('Unexpected error when reading %s: %s.', path, e)
value = False
return value
def get_max(self):
maxval = self._read(self.max_path)
if (not maxval):
logger.warning('Max value was not read. Module may behave unexpectedly.')
return maxval
def get_current(self):
current = self._read(self.bright_path)
if (not current):
logger.warning('Current value was not read. Module may behave unexpectedly.')
return current
def _set_current(self, newval):
try:
with open(self.bright_path, 'w') as b:
b.write(str(newval))
success = True
except PermissionError:
logger.error('No write access to %s.', self.bright_path)
success = False
except Exception as e:
logger.error('Unexpected error when writing brightness value: %s.', e)
success = False
return success
_command()
def brightness_up(self):
self.change_brightness(self.step)
_command()
def brightness_down(self):
self.change_brightness((self.step * (- 1)))
_command()
def set_brightness_value(self, value):
self._set_brightness(value)
_command()
def set_brightness_percent(self, percent):
value = int((self.max * percent))
self._set_brightness(value)
_command()
def info(self):
info = base._Widget.info(self)
info['brightness'] = self.current
info['max_brightness'] = self.max
info['min_brightness'] = self.min
return info |
class ClassVisTimeline(GrpCls.ClassHtml):
def __init__(self, component: primitives.HtmlModel):
super(ClassVisTimeline, self).__init__(component)
(self._css_vis_items, self._css_vis_items_overflow) = (None, None)
self.classList['other'].add(self.css_items)
self.classList['other'].add(self.css_items_overflow)
def css_items(self) -> Classes.CatalogChart.CatalogChart:
if (self._css_vis_items is None):
self._css_vis_items = Classes.CatalogChart.CatalogChart(self.component.page, self.classList['other'], html_id=self.component.htmlCode, component=self.component).vis_items()
return self._css_vis_items
def css_items_overflow(self) -> Classes.CatalogChart.CatalogChart:
if (self._css_vis_items_overflow is None):
self._css_vis_items_overflow = Classes.CatalogChart.CatalogChart(self.component.page, self.classList['other'], html_id=self.component.htmlCode, component=self.component).vis_items_overflow()
return self._css_vis_items_overflow |
class ServerResponseAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'service_id': (str,), 'id': (str,), 'pool_id': (str,)}
_property
def discriminator():
return None
attribute_map = {'service_id': 'service_id', 'id': 'id', 'pool_id': 'pool_id'}
read_only_vars = {'service_id', 'id', 'pool_id'}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def test_get_stations(db, client, jwt):
event = EventFactoryBasic()
microlocation = MicrolocationSubFactory(event=event)
station = StationFactory(event=event, microlocation=microlocation)
db.session.commit()
response = client.get(f'/v1/stations/{station.id}', content_type='application/vnd.api+json', headers=jwt)
assert (response.status_code == 200)
data = json.loads(response.data)['data']
assert (data['id'] == station.id)
attributes = data['attributes']
assert (attributes['station-name'] == station.station_name)
assert (attributes['station-type'] == station.station_type) |
_figures_equal(extensions=['png'])
def test_plot_filtered_hist(fig_test, fig_ref):
test_data = TestData()
pd_flights = test_data.pd_flights()[['DistanceKilometers', 'DistanceMiles', 'FlightDelayMin', 'FlightTimeHour']]
ed_flights = test_data.ed_flights()[['DistanceKilometers', 'DistanceMiles', 'FlightDelayMin', 'FlightTimeHour']]
pd_flights = pd_flights[(pd_flights.FlightDelayMin > 0)]
ed_flights = ed_flights[(ed_flights.FlightDelayMin > 0)]
with pytest.warns(UserWarning):
pd_ax = fig_ref.subplots()
pd_flights.hist(ax=pd_ax)
with pytest.warns(UserWarning):
ed_ax = fig_test.subplots()
ed_flights.hist(ax=ed_ax) |
class OptionPlotoptionsTimelineDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class JsBreadCrumb():
def __init__(self, src: primitives.PageModel=None):
self.page = src
self._selector = 'breadcrumb'
self._anchor = None
self.page.properties.js.add_builders(('%s = {pmts: %s}' % (self._selector, json.dumps(self.page.inputs))))
def add(self, key: str, data: Union[(str, primitives.JsDataModel)], js_conv_func: Optional[Union[(str, list)]]=None):
return JsFncs.JsFunction(('%s["pmts"]["%s"] = %s' % (self._selector, key, JsUtils.jsConvertData(data, js_conv_func))))
def get(self, key: Optional[str]=None):
if (key is None):
return JsObject.JsObject(('%s' % self._selector))
return JsObject.JsObject(('%s["pmts"]["%s"]' % (self._selector, key)))
def hash(self, data: Union[(str, primitives.JsDataModel)], js_conv_func: Optional[Union[(str, list)]]=None):
return JsObject.JsObject('{}["anchor"] = {}'.format(self._selector, JsUtils.jsConvertData(data, js_conv_func)))
def url(self):
js_location = JsLocation.JsLocation(self.page)
origin = js_location.origin
pathname = js_location.pathname
return JsString.JsString((((origin + pathname) + '?') + JsObject.JsObject(self.toStr(), page=self.page)), js_code='window')
def toStr(self):
return ('%s(%s)' % (JsFncs.FncOnRecords(None, self.page.properties.js).url(), self._selector)) |
def sequence_builder(start_number: T, max_length: int, skip: int, reverse: bool) -> Tuple[(T, ...)]:
if (max_length > (MAXIMUM_OBJECT_MEMORY_BYTES // 8)):
raise OversizeObject(f'Sequence is too big to fit in memory: {max_length}')
if reverse:
step = ((- 1) * (skip + 1))
else:
step = (skip + 1)
cutoff_number = (start_number + (step * max_length))
whole_range = range(start_number, cutoff_number, step)
return cast(Tuple[(T, ...)], tuple((number for number in whole_range if (0 <= number <= UINT_256_MAX)))) |
class RequestProcessor():
_request_information_cache: SimpleCache
_request_response_cache: SimpleCache
_subscription_response_deque: Deque[RPCResponse]
def __init__(self, provider: 'PersistentConnectionProvider', subscription_response_deque_size: int=500) -> None:
self._provider = provider
self._request_information_cache = SimpleCache(500)
self._request_response_cache = SimpleCache(500)
self._subscription_response_deque = deque(maxlen=subscription_response_deque_size)
def active_subscriptions(self) -> Dict[(str, Any)]:
return {value.subscription_id: {'params': value.params} for (key, value) in self._request_information_cache.items() if (value.method == 'eth_subscribe')}
def cache_request_information(self, method: RPCEndpoint, params: Any, response_formatters: Tuple[(Callable[(..., Any)], ...)]) -> str:
request_id = next(copy(self._provider.request_counter))
cache_key = generate_cache_key(request_id)
self._bump_cache_if_key_present(cache_key, request_id)
request_info = RequestInformation(method, params, response_formatters)
self._provider.logger.debug(f'''Caching request info:
request_id={request_id},
cache_key={cache_key},
request_info={request_info.__dict__}''')
self._request_information_cache.cache(cache_key, request_info)
return cache_key
def _bump_cache_if_key_present(self, cache_key: str, request_id: int) -> None:
if (cache_key in self._request_information_cache):
original_request_info = self._request_information_cache.get_cache_entry(cache_key)
bump = generate_cache_key((request_id + 1))
self._bump_cache_if_key_present(bump, (request_id + 1))
self._provider.logger.debug(f'''Caching internal request. Bumping original request in cache:
request_id=[{request_id}] -> [{(request_id + 1)}],
cache_key=[{cache_key}] -> [{bump}],
request_info={original_request_info.__dict__}''')
self._request_information_cache.cache(bump, original_request_info)
def pop_cached_request_information(self, cache_key: str) -> Optional[RequestInformation]:
request_info = self._request_information_cache.pop(cache_key)
if (request_info is not None):
self._provider.logger.debug(f'''Request info popped from cache:
cache_key={cache_key},
request_info={request_info.__dict__}''')
return request_info
def get_request_information_for_response(self, response: RPCResponse) -> RequestInformation:
if (('method' in response) and (response['method'] == 'eth_subscription')):
if ('params' not in response):
raise ValueError('Subscription response must have params field')
if ('subscription' not in response['params']):
raise ValueError('Subscription response params must have subscription field')
cache_key = generate_cache_key(response['params']['subscription'])
request_info = self._request_information_cache.get_cache_entry(cache_key)
else:
cache_key = generate_cache_key(response['id'])
request_info = self.pop_cached_request_information(cache_key)
if ((request_info is not None) and (request_info.method == 'eth_unsubscribe') and (response.get('result') is True)):
subscription_id = request_info.params[0]
subscribe_cache_key = generate_cache_key(subscription_id)
self.pop_cached_request_information(subscribe_cache_key)
self._subscription_response_deque = deque(filter((lambda sub_response: (sub_response['params']['subscription'] != subscription_id)), self._subscription_response_deque), maxlen=self._subscription_response_deque.maxlen)
return request_info
def append_middleware_response_processor(self, response: RPCResponse, middleware_response_processor: Callable[(..., Any)]) -> None:
response_id = response.get('id', None)
if (response_id is not None):
cache_key = generate_cache_key(response_id)
cached_request_info_for_id: RequestInformation = self._request_information_cache.get_cache_entry(cache_key)
if (cached_request_info_for_id is not None):
cached_request_info_for_id.middleware_response_processors.append(middleware_response_processor)
else:
self._provider.logger.debug(f'No cached request info for response id `{response_id}`. Cannot append middleware response processor for response: {response}')
else:
self._provider.logger.debug(f'No response `id` in response. Cannot append middleware response processor for response: {response}')
def cache_raw_response(self, raw_response: Any, subscription: bool=False) -> None:
if subscription:
self._provider.logger.debug(f'''Caching subscription response:
response={raw_response}''')
self._subscription_response_deque.append(raw_response)
else:
response_id = raw_response.get('id')
cache_key = generate_cache_key(response_id)
self._provider.logger.debug(f'''Caching response:
response_id={response_id},
cache_key={cache_key},
response={raw_response}''')
self._request_response_cache.cache(cache_key, raw_response)
def pop_raw_response(self, cache_key: str=None, subscription: bool=False) -> Any:
if subscription:
deque_length = len(self._subscription_response_deque)
if (deque_length == 0):
return None
raw_response = self._subscription_response_deque.popleft()
self._provider.logger.debug(f'Subscription response deque is not empty. Processing {deque_length} subscription(s) as FIFO before receiving new response.')
self._provider.logger.debug(f'''Cached subscription response popped from deque to be processed:
raw_response={raw_response}''')
else:
if (not cache_key):
raise ValueError('Must provide cache key when popping a non-subscription response.')
raw_response = self._request_response_cache.pop(cache_key)
if (raw_response is not None):
self._provider.logger.debug(f'''Cached response popped from cache to be processed:
cache_key={cache_key},
raw_response={raw_response}''')
return raw_response
def clear_caches(self) -> None:
self._request_information_cache.clear()
self._request_response_cache.clear()
self._subscription_response_deque.clear() |
class DockerDeployment():
def __init__(self, dockerfile: str, context: str, image: str, tag: str, registry: str):
self.dockerfile = dockerfile
self.context = context
self.image = image
self.tag = tag
self.registry = registry
def build_and_publish(self) -> bool:
cmds: List[List[str]] = []
cmds.append(['docker', 'build', '-t', '{}:{}'.format(self.image, self.tag), '-f', self.dockerfile, self.context])
cmds.append(['docker', 'tag', '{}:{}'.format(self.image, self.tag), '{}/{}:{}'.format(self.registry, self.image, self.tag)])
cmds.append(['docker', 'push', '{}/{}:{}'.format(self.registry, self.image, self.tag)])
for cmd in cmds:
(_, ok) = _execute_cmd(cmd)
if (not ok):
return False
return True |
def get_all_server_systems(url: AnyHttpUrl, headers: Dict[(str, str)], exclude_systems: List[System]) -> List[System]:
ls_response = handle_cli_response(api.ls(url=url, resource_type='system', headers=headers), verbose=False)
exclude_system_keys = [system.fides_key for system in exclude_systems]
system_keys = [resource['fides_key'] for resource in ls_response.json() if (resource['fides_key'] not in exclude_system_keys)]
system_list = [System.validate(x) for x in get_server_resources(url=url, resource_type='system', headers=headers, existing_keys=system_keys)]
return system_list |
def test_localaccount(accounts):
local = accounts.add()
assert (local.balance() == 0)
accounts[0].transfer(local, '10 ether')
assert (local.balance() == '10 ether')
local.transfer(accounts[1], '1 ether')
assert (accounts[1].balance() == '1001 ether')
assert (local.nonce == 1) |
def test_lobatto_tri0():
print('0th Order Polynomial')
print('Triangle')
lobattoTriangle.setOrder(1)
int0_f0 = dot(f0(lobattoTriangle.points), lobattoTriangle.weights)
print(int0_f0)
lobattoTriangle.setOrder(2)
int1_f0 = dot(f0(lobattoTriangle.points), lobattoTriangle.weights)
print(int1_f0)
lobattoTriangle.setOrder(3)
int2_f0 = dot(f0(lobattoTriangle.points), lobattoTriangle.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0, int1_f0)
npt.assert_almost_equal(int1_f0, int2_f0) |
def _deserialize(data, start, typ):
if (typ in ('hash32', 'address')):
length = (20 if (typ == 'address') else 32)
assert ((len(data) + start) >= length)
return (data[start:(start + length)], (start + length))
elif (isinstance(typ, str) and (typ[:3] == 'int')):
length = int(typ[3:])
assert ((length % 8) == 0)
assert ((len(data) + start) >= (length // 8))
return (int.from_bytes(data[start:(start + (length // 8))], 'big'), (start + (length // 8)))
elif (typ == 'bytes'):
length = int.from_bytes(data[start:(start + 4)], 'big')
assert ((len(data) + start) >= (4 + length))
return (data[(start + 4):((start + 4) + length)], ((start + 4) + length))
elif isinstance(typ, list):
assert (len(typ) == 1)
length = int.from_bytes(data[start:(start + 4)], 'big')
(pos, o) = ((start + 4), [])
while (pos < ((start + 4) + length)):
(result, pos) = _deserialize(data, pos, typ[0])
o.append(result)
assert (pos == ((start + 4) + length))
return (o, pos)
elif isinstance(typ, type):
length = int.from_bytes(data[start:(start + 4)], 'big')
values = {}
pos = (start + 4)
for k in sorted(typ.fields.keys()):
(values[k], pos) = _deserialize(data, pos, typ.fields[k])
assert (pos == ((start + 4) + length))
return (typ(**values), pos)
raise Exception('Cannot deserialize', typ) |
class TestApprovePrivacyRequest():
(scope='function')
def url(self, db, privacy_request):
return (V1_URL_PREFIX + PRIVACY_REQUEST_APPROVE)
(scope='function')
def privacy_request_review_notification_enabled(self, db):
original_value = CONFIG.notifications.send_request_review_notification
CONFIG.notifications.send_request_review_notification = True
ApplicationConfig.update_config_set(db, CONFIG)
(yield)
CONFIG.notifications.send_request_review_notification = original_value
ApplicationConfig.update_config_set(db, CONFIG)
def test_approve_privacy_request_not_authenticated(self, url, api_client):
response = api_client.patch(url)
assert (response.status_code == 401)
def test_approve_privacy_request_bad_scopes(self, url, api_client, generate_auth_header):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_READ])
response = api_client.patch(url, headers=auth_header)
assert (response.status_code == 403)
def test_approve_privacy_request_viewer_role(self, url, api_client, generate_role_header):
auth_header = generate_role_header(roles=[VIEWER])
response = api_client.patch(url, headers=auth_header)
assert (response.status_code == 403)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_approve_privacy_request_approver_role(self, _, url, api_client, generate_role_header, privacy_request, db):
privacy_request.status = PrivacyRequestStatus.pending
privacy_request.save(db=db)
auth_header = generate_role_header(roles=[APPROVER])
body = {'request_ids': [privacy_request.id]}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_approve_privacy_request_does_not_exist(self, submit_mock, db, url, api_client, generate_auth_header, privacy_request):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_REVIEW])
body = {'request_ids': ['does_not_exist']}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
response_body = response.json()
assert (response_body['succeeded'] == [])
assert (len(response_body['failed']) == 1)
assert (response_body['failed'][0]['message'] == "No privacy request found with id 'does_not_exist'")
assert (not submit_mock.called)
.parametrize('privacy_request_status', [PrivacyRequestStatus.complete, PrivacyRequestStatus.canceled])
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_approve_privacy_request_in_non_pending_state(self, submit_mock, db, url, api_client, generate_auth_header, privacy_request, privacy_request_status):
privacy_request.status = privacy_request_status
privacy_request.save(db=db)
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_REVIEW])
body = {'request_ids': [privacy_request.id]}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
response_body = response.json()
assert (response_body['succeeded'] == [])
assert (len(response_body['failed']) == 1)
assert (response_body['failed'][0]['message'] == 'Cannot transition status')
assert (response_body['failed'][0]['data']['status'] == privacy_request_status.value)
assert (not submit_mock.called)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_approve_privacy_request_no_user_on_client(self, submit_mock, db, url, api_client, generate_auth_header, privacy_request, user):
privacy_request.status = PrivacyRequestStatus.pending
privacy_request.save(db=db)
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_REVIEW])
body = {'request_ids': [privacy_request.id]}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
response_body = response.json()
assert (len(response_body['succeeded']) == 1)
assert (len(response_body['failed']) == 0)
assert (response_body['succeeded'][0]['status'] == 'approved')
assert (response_body['succeeded'][0]['id'] == privacy_request.id)
assert (response_body['succeeded'][0]['reviewed_at'] is not None)
assert (response_body['succeeded'][0]['reviewed_by'] is None)
assert submit_mock.called
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
('fides.api.api.v1.endpoints.privacy_request_endpoints.dispatch_message_task.apply_async')
def test_approve_privacy_request(self, mock_dispatch_message, submit_mock, db, url, api_client, generate_auth_header, user, privacy_request):
privacy_request.status = PrivacyRequestStatus.pending
privacy_request.save(db=db)
payload = {JWE_PAYLOAD_ROLES: user.client.roles, JWE_PAYLOAD_CLIENT_ID: user.client.id, JWE_ISSUED_AT: datetime.now().isoformat()}
auth_header = {'Authorization': ('Bearer ' + generate_jwe(json.dumps(payload), CONFIG.security.app_encryption_key))}
body = {'request_ids': [privacy_request.id]}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
response_body = response.json()
assert (len(response_body['succeeded']) == 1)
assert (len(response_body['failed']) == 0)
assert (response_body['succeeded'][0]['status'] == 'approved')
assert (response_body['succeeded'][0]['id'] == privacy_request.id)
assert (response_body['succeeded'][0]['reviewed_at'] is not None)
assert (response_body['succeeded'][0]['reviewed_by'] == user.id)
assert (response_body['succeeded'][0]['custom_privacy_request_fields_approved_at'] is None)
assert (response_body['succeeded'][0]['custom_privacy_request_fields_approved_by'] is None)
assert submit_mock.called
assert (not mock_dispatch_message.called)
privacy_request.delete(db)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
('fides.api.api.v1.endpoints.privacy_request_endpoints.dispatch_message_task.apply_async')
def test_approve_privacy_request_with_custom_fields(self, mock_dispatch_message, submit_mock, db, url, api_client, generate_auth_header, user, privacy_request_with_custom_fields, allow_custom_privacy_request_field_collection_enabled):
privacy_request = privacy_request_with_custom_fields
privacy_request.status = PrivacyRequestStatus.pending
privacy_request.save(db=db)
payload = {JWE_PAYLOAD_ROLES: user.client.roles, JWE_PAYLOAD_CLIENT_ID: user.client.id, JWE_ISSUED_AT: datetime.now().isoformat()}
auth_header = {'Authorization': ('Bearer ' + generate_jwe(json.dumps(payload), CONFIG.security.app_encryption_key))}
body = {'request_ids': [privacy_request.id]}
response = api_client.patch(url, headers=auth_header, json=body)
assert (response.status_code == 200)
response_body = response.json()
assert (len(response_body['succeeded']) == 1)
assert (len(response_body['failed']) == 0)
assert (response_body['succeeded'][0]['status'] == 'approved')
assert (response_body['succeeded'][0]['id'] == privacy_request.id)
assert (response_body['succeeded'][0]['reviewed_at'] is not None)
assert (response_body['succeeded'][0]['reviewed_by'] == user.id)
assert (response_body['succeeded'][0]['custom_privacy_request_fields_approved_at'] is not None)
assert (response_body['succeeded'][0]['custom_privacy_request_fields_approved_by'] == user.id)
assert submit_mock.called
assert (not mock_dispatch_message.called)
privacy_request.delete(db)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
('fides.api.api.v1.endpoints.privacy_request_endpoints.dispatch_message_task.apply_async')
def test_approve_privacy_request_creates_audit_log_and_sends_email(self, mock_dispatch_message, submit_mock, db, url, api_client, generate_auth_header, user, privacy_request_status_pending, privacy_request_review_notification_enabled):
payload = {JWE_PAYLOAD_ROLES: user.client.roles, JWE_PAYLOAD_CLIENT_ID: user.client.id, JWE_ISSUED_AT: datetime.now().isoformat()}
auth_header = {'Authorization': ('Bearer ' + generate_jwe(json.dumps(payload), CONFIG.security.app_encryption_key))}
body = {'request_ids': [privacy_request_status_pending.id]}
api_client.patch(url, headers=auth_header, json=body)
approval_audit_log: AuditLog = AuditLog.filter(db=db, conditions=(((AuditLog.privacy_request_id == privacy_request_status_pending.id) & (AuditLog.user_id == user.id)) & (AuditLog.action == AuditLogAction.approved))).first()
assert (approval_audit_log is not None)
assert (approval_audit_log.message == '')
approval_audit_log.delete(db)
call_args = mock_dispatch_message.call_args[1]
task_kwargs = call_args['kwargs']
assert (task_kwargs['to_identity'] == Identity(email=''))
assert (task_kwargs['service_type'] == MessagingServiceType.mailgun.value)
message_meta = task_kwargs['message_meta']
assert (message_meta['action_type'] == MessagingActionType.PRIVACY_REQUEST_REVIEW_APPROVE)
assert (message_meta['body_params'] is None)
queue = call_args['queue']
assert (queue == MESSAGING_QUEUE_NAME) |
def main(args):
features = []
names = []
with open(args.features) as f:
for line in f:
line = line.strip()
names.append(line.split(':')[0])
features.append([float(x) for x in line.split(':')[(- 1)].split('\t')])
if (0.0 < args.clip_features < 100.0):
np.percentile(features, args.clip_features, axis=0, overwrite_input=True)
if args.normalize_features:
features = normalize_features(features)
if (args.type == 'kmeans'):
clusters = run_kmeans_clustering(features, names, args)
elif (args.type == 'hierarchical'):
clusters = run_hierarchical_clustering(features, names, args)
elif (args.type == 'optics'):
clusters = run_optics_clustering(features, names, args)
elif (args.type == 'dbscan'):
clusters = run_dbscan_clustering(features, names, args)
ranked_clusters = get_ranked_clusters(clusters)
with open(args.output_file, 'w') as f:
for cluster in ranked_clusters:
for (name, rank, score) in ranked_clusters[cluster]:
f.write((((','.join([str(cluster), str(rank), str(score)]) + ':') + str(name)) + '\n')) |
def que_monitor_loop(server, worker):
log = worker.log
while True:
try:
que_monitor(cq, _info=log.info, _debug=log.debug)
except OPERATIONAL_ERRORS as ex:
log.exception(ex)
log.critical('Dedicated que event monitor terminated. Closing DB connection and restarting in 1 second...')
from django import db
db.close_old_connections()
except Exception as ex:
log.exception(ex)
log.critical('Dedicated que event monitor terminated. Restarting in 5 seconds...')
sleep(5) |
def predict(model: ChatModel):
args = model.data_args
predict_data = prepare_dataset(args.predicted_input_filename)
result = inference(model, predict_data)
with open(args.predicted_out_filename, 'w') as f:
for p in result:
try:
f.write((p.replace('\n', ' ') + '\n'))
except:
f.write('Invalid Output!\n') |
def run_as_admin(cmd, cwd=None, check=True, wait=True):
import win32con
import win32event
import win32process
from win32com.shell import shellcon
from win32com.shell.shell import ShellExecuteEx
kwargs = dict(nShow=win32con.SW_SHOWNORMAL, fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, lpVerb='runas', lpFile=cmd[0], lpParameters=' '.join(cmd[1:]))
if (cwd is not None):
kwargs['lpDirectory'] = cwd
logger.info('Running command: %s', ' '.join(cmd))
procInfo = ShellExecuteEx(**kwargs)
if (check or wait):
procHandle = procInfo['hProcess']
_ = win32event.WaitForSingleObject(procHandle, win32event.INFINITE)
rc = win32process.GetExitCodeProcess(procHandle)
logger.info('Process handle %s returned code %d', procHandle, rc)
if (check and (rc < 0)):
raise subprocess.CalledProcessError(rc, cmd)
else:
rc = None
return rc |
def mock_audit_events_that_modify_mandatory_access_controls_are_collected_pass(self, cmd):
stdout = ['-w /etc/selinux -p wa -k MAC-policy', '-w /usr/share/selinux -p wa -k MAC-policy']
stderr = ['']
returncode = 0
return SimpleNamespace(returncode=returncode, stderr=stderr, stdout=stdout) |
def print_results(output_format: str, parameters: Dict, result_fn: Callable[(..., List[Tuple[(str, Any, Any, Any)]])]) -> Any:
if (output_format != 'text'):
raise ValueError(f'Bad output format {output_format}')
click.echo('Start test with options:')
for (name, value) in parameters.items():
click.echo(f'* {name}: {value}')
click.echo('\nResults:')
for (msg, *values_set) in result_fn():
(mean_, stdev_, variance_) = map((lambda x: round(x, 6)), values_set)
click.echo(f' * {msg}: mean: {mean_} stdev: {stdev_} variance: {variance_} ')
click.echo('Test finished.') |
class AlienInvasion():
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))
pygame.display.set_caption('Alien Invasion')
self.ship = Ship(self)
self.bullets = pygame.sprite.Group()
self.aliens = pygame.sprite.Group()
self._create_fleet()
def run_game(self):
while True:
self._check_events()
self.ship.update()
self._update_bullets()
self._update_screen()
self.clock.tick(60)
def _check_events(self):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
sys.exit()
elif (event.type == pygame.KEYDOWN):
self._check_keydown_events(event)
elif (event.type == pygame.KEYUP):
self._check_keyup_events(event)
def _check_keydown_events(self, event):
if (event.key == pygame.K_RIGHT):
self.ship.moving_right = True
elif (event.key == pygame.K_LEFT):
self.ship.moving_left = True
elif (event.key == pygame.K_q):
sys.exit()
elif (event.key == pygame.K_SPACE):
self._fire_bullet()
def _check_keyup_events(self, event):
if (event.key == pygame.K_RIGHT):
self.ship.moving_right = False
elif (event.key == pygame.K_LEFT):
self.ship.moving_left = False
def _fire_bullet(self):
if (len(self.bullets) < self.settings.bullets_allowed):
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def _update_bullets(self):
self.bullets.update()
for bullet in self.bullets.copy():
if (bullet.rect.bottom <= 0):
self.bullets.remove(bullet)
def _create_fleet(self):
alien = Alien(self)
(alien_width, alien_height) = alien.rect.size
(current_x, current_y) = (alien_width, alien_height)
while (current_y < (self.settings.screen_height - (3 * alien_height))):
while (current_x < (self.settings.screen_width - (2 * alien_width))):
self._create_alien(current_x, current_y)
current_x += (2 * alien_width)
current_x = alien_width
current_y += (2 * alien_height)
def _create_alien(self, x_position, y_position):
new_alien = Alien(self)
new_alien.x = x_position
new_alien.rect.x = x_position
new_alien.rect.y = y_position
self.aliens.add(new_alien)
def _update_screen(self):
self.screen.fill(self.settings.bg_color)
for bullet in self.bullets.sprites():
bullet.draw_bullet()
self.ship.blitme()
self.aliens.draw(self.screen)
pygame.display.flip() |
class FacetedResponse(Response):
def query_string(self):
return self._faceted_search._query
def facets(self):
if (not hasattr(self, '_facets')):
super(AttrDict, self).__setattr__('_facets', AttrDict({}))
for (name, facet) in self._faceted_search.facets.items():
self._facets[name] = facet.get_values(getattr(getattr(self.aggregations, ('_filter_' + name)), name), self._faceted_search.filter_values.get(name, ()))
return self._facets |
class OptionPlotoptionsColumnpyramidSonificationContexttracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.django_db
def test_non_existing_action(admin_site, article):
from test_proj.blog.admin import ArticleAdmin
ArticleAdmin.inline_actions = ['non_existing']
fake_request = {}
admin = ArticleAdmin(article, admin_site)
admin._request = fake_request
with pytest.raises(RuntimeError):
admin.render_inline_actions(article)
ArticleAdmin.inline_actions = [] |
class MQ135():
_PARAMS = {'CO2': [109, (- 2.88)], 'CO': [583, (- 3.93)], 'EtOH': [76.4, (- 3.18)], 'NH3': [102, (- 2.49)], 'Tol': [44.6, (- 3.45)], 'Ace': [33.9, (- 3.42)]}
_TEMPERATURE_CORRECTION = [0.000328, (- 0.0255), 1.38]
_HUMIDITY_CORRECTION = (- 0.224)
def __init__(self, gas: str, r_load: float, device: SerialHandler=None, channel: str='CH1', r0: float=None, temperature: Union[(float, Callable)]=20, humidity: Union[(float, Callable)]=0.65):
self._multimeter = Multimeter(device)
self._params = self._PARAMS[gas]
self.channel = channel
self.r_load = r_load
self.r0 = r0
self.vcc = 5
if isinstance(temperature, Callable):
self._temperature = temperature
else:
def _temperature():
return temperature
self._temperature = _temperature
if isinstance(humidity, Callable):
self._humidity = humidity
else:
def _humidity():
return humidity
self._humidity = _humidity
def _voltage(self):
return self._multimeter.measure_voltage(self.channel)
def _correction(self):
t = self._temperature()
h = self._humidity()
(a, b, c, d) = (*self._TEMPERATURE_CORRECTION, self._HUMIDITY_CORRECTION)
return ((((a * (t ** 2)) + (b * t)) + c) + (d * (h - 0.65)))
def _sensor_resistance(self):
return ((((self.vcc / self._voltage) - 1) * self.r_load) / self._correction)
def measure_concentration(self):
try:
return (self._params[0] * ((self._sensor_resistance / self.r0) ** self._params[1]))
except TypeError:
raise TypeError('r0 is not set.')
def measure_r0(self, gas_concentration: float):
return (self._sensor_resistance * ((gas_concentration / self._params[0]) ** (1 / (- self._params[1])))) |
.dependency(depends=['test_request_message_nanp'])
def test_request_message_anp():
message = make_msg()
instance = usm.UserSecurityModel()
instance.local_config[b'engine-id'] = {'authoritative_engine_boots': 1, 'authoritative_engine_time': 12}
result = instance.generate_request_message(message, b'engine-id', V3('username', Auth(b'authkey', 'md5'), None))
expected = PlainMessage(version=3, header=HeaderData(message_id=123, message_max_size=234, flags=V3Flags(auth=True, priv=True, reportable=True), security_model=3), security_parameters=b'0+\x04\tengine-id\x02\x01\x01\x02\x01\x0c\x04\x08username\x04\x0c>\xb8\xff\x7fA<\x00\xfa\x066r\xed\x04\x00', scoped_pdu=GetRequest(PDUContent(123, [])))
assert (result == expected) |
def hosts_to_node_configs(hosts: _TYPE_HOSTS) -> List[NodeConfig]:
if isinstance(hosts, str):
return hosts_to_node_configs([hosts])
node_configs: List[NodeConfig] = []
for host in hosts:
if isinstance(host, NodeConfig):
node_configs.append(host)
elif isinstance(host, str):
node_configs.append(url_to_node_config(host))
elif isinstance(host, Mapping):
node_configs.append(host_mapping_to_node_config(host))
else:
raise ValueError("'hosts' must be a list of URLs, NodeConfigs, or dictionaries")
return node_configs |
def create_patch_filenames(fromfile, tofile, patchfile, compression='lzma', patch_type='sequential', algorithm='bsdiff', suffix_array_algorithm='divsufsort', memory_size=None, segment_size=None, minimum_shift_size=None, data_format=None, from_data_offset_begin=0, from_data_offset_end=0, from_data_begin=0, from_data_end=0, from_code_begin=0, from_code_end=0, to_data_offset_begin=0, to_data_offset_end=0, to_data_begin=0, to_data_end=0, to_code_begin=0, to_code_end=0, match_score=6, match_block_size=64, use_mmap=True, heatshrink_window_sz2=8, heatshrink_lookahead_sz2=7):
with open(fromfile, 'rb') as ffrom:
with open(tofile, 'rb') as fto:
with open(patchfile, 'wb') as fpatch:
create_patch(ffrom, fto, fpatch, compression, patch_type, algorithm, suffix_array_algorithm, memory_size, segment_size, minimum_shift_size, data_format, from_data_offset_begin, from_data_offset_end, from_data_begin, from_data_end, from_code_begin, from_code_end, to_data_offset_begin, to_data_offset_end, to_data_begin, to_data_end, to_code_begin, to_code_end, match_score, match_block_size, use_mmap, heatshrink_window_sz2, heatshrink_lookahead_sz2) |
.network
def test_retrieve():
with TemporaryDirectory() as local_store:
data_file = 'tiny-data.txt'
url = (BASEURL + data_file)
with capture_log() as log_file:
fname = retrieve(url, known_hash=None, path=local_store)
logs = log_file.getvalue()
assert (logs.split()[0] == 'Downloading')
assert ('SHA256 hash of downloaded file:' in logs)
assert (REGISTRY[data_file] in logs)
assert (data_file == fname[(- len(data_file)):])
check_tiny_data(fname)
assert (file_hash(fname) == REGISTRY[data_file])
with capture_log() as log_file:
fname = retrieve(url, known_hash=None, path=local_store)
assert (log_file.getvalue() == '')
with capture_log() as log_file:
fname = retrieve(url, known_hash=REGISTRY[data_file], path=local_store)
assert (log_file.getvalue() == '') |
def downgrade():
op.alter_column('sessions', 'video_url', new_column_name='video')
op.alter_column('sessions', 'audio_url', new_column_name='audio')
op.alter_column('sessions', 'slides_url', new_column_name='slides')
op.alter_column('sessions_version', 'video_url', new_column_name='video')
op.alter_column('sessions_version', 'slides_url', new_column_name='slides')
op.alter_column('sessions_version', 'audio_url', new_column_name='audio') |
def test_get_fastas_complete(o_dir, e_dir, request):
o_file = os.path.join(o_dir, 'taxon-set.complete.fasta')
cmd = get_fastas_cmd(o_dir, e_dir, o_file, request, incomplete=False)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
assert (proc.returncode == 0), print('{}'.format(stderr.decode('utf-8')))
observed_sequences = SeqIO.to_dict(SeqIO.parse(o_file, 'fasta'))
expected_sequences = SeqIO.to_dict(SeqIO.parse(os.path.join(e_dir, 'taxon-set.complete.fasta'), 'fasta'))
for (k, v) in observed_sequences.items():
assert (v.seq == expected_sequences[k].seq) |
class AssetTester(UnitTestDBBase):
def setUp(self):
super(AssetTester, self).setUp()
from stalker import User
from stalker.db.session import DBSession
self.test_user1 = User(name='User1', login='user1', password='12345', email='.com')
DBSession.add(self.test_user1)
self.test_user2 = User(name='User2', login='user2', password='12345', email='.com')
DBSession.add(self.test_user2)
DBSession.commit()
from stalker import Status, Project
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_cmpl = Status.query.filter_by(code='CMPL').first()
from stalker import Type
self.commercial_project_type = Type(name='Commercial Project', code='commproj', target_entity_type='Project')
DBSession.add(self.commercial_project_type)
self.asset_type1 = Type(name='Character', code='char', target_entity_type='Asset')
DBSession.add(self.asset_type1)
self.asset_type2 = Type(name='Environment', code='env', target_entity_type='Asset')
DBSession.add(self.asset_type2)
self.repository_type = Type(name='Test Repository Type', code='testrepo', target_entity_type='Repository')
DBSession.add(self.repository_type)
from stalker import Repository
self.repository = Repository(name='Test Repository', code='TR', type=self.repository_type)
DBSession.add(self.repository)
self.project1 = Project(name='Test Project1', code='tp1', type=self.commercial_project_type, repositories=[self.repository])
DBSession.add(self.project1)
DBSession.commit()
from stalker import Sequence
self.seq1 = Sequence(name='Test Sequence', code='tseq', project=self.project1, responsible=[self.test_user1])
DBSession.add(self.seq1)
from stalker import Shot
self.shot1 = Shot(code='TestSH001', project=self.project1, sequences=[self.seq1], responsible=[self.test_user1])
DBSession.add(self.shot1)
self.shot2 = Shot(code='TestSH002', project=self.project1, sequences=[self.seq1], responsible=[self.test_user1])
DBSession.add(self.shot2)
self.shot3 = Shot(code='TestSH003', project=self.project1, sequences=[self.seq1], responsible=[self.test_user1])
DBSession.add(self.shot3)
self.shot4 = Shot(code='TestSH004', project=self.project1, sequences=[self.seq1], responsible=[self.test_user1])
DBSession.add(self.shot4)
self.kwargs = {'name': 'Test Asset', 'code': 'ta', 'description': 'This is a test Asset object', 'project': self.project1, 'type': self.asset_type1, 'status': 0, 'responsible': [self.test_user1]}
from stalker import Asset, Task
self.asset1 = Asset(**self.kwargs)
DBSession.add(self.asset1)
self.task1 = Task(name='Task1', parent=self.asset1)
DBSession.add(self.task1)
self.task2 = Task(name='Task2', parent=self.asset1)
DBSession.add(self.task2)
self.task3 = Task(name='Task3', parent=self.asset1)
DBSession.add(self.task3)
DBSession.commit()
def test___auto_name__class_attribute_is_set_to_False(self):
from stalker import Asset
assert (Asset.__auto_name__ is False)
def test_equality(self):
from stalker import Asset, Entity
new_asset1 = Asset(**self.kwargs)
new_asset2 = Asset(**self.kwargs)
new_entity1 = Entity(**self.kwargs)
self.kwargs['type'] = self.asset_type2
new_asset3 = Asset(**self.kwargs)
self.kwargs['name'] = 'another name'
new_asset4 = Asset(**self.kwargs)
assert (new_asset1 == new_asset2)
assert (not (new_asset1 == new_asset3))
assert (not (new_asset1 == new_asset4))
assert (not (new_asset3 == new_asset4))
assert (not (new_asset1 == new_entity1))
def test_inequality(self):
from stalker import Asset, Entity
new_asset1 = Asset(**self.kwargs)
new_asset2 = Asset(**self.kwargs)
new_entity1 = Entity(**self.kwargs)
self.kwargs['type'] = self.asset_type2
new_asset3 = Asset(**self.kwargs)
self.kwargs['name'] = 'another name'
new_asset4 = Asset(**self.kwargs)
assert (not (new_asset1 != new_asset2))
assert (new_asset1 != new_asset3)
assert (new_asset1 != new_asset4)
assert (new_asset3 != new_asset4)
assert (new_asset1 != new_entity1)
def test_ReferenceMixin_initialization(self):
from stalker import Link, Type
link_type_1 = Type(name='Image', code='image', target_entity_type='Link')
link1 = Link(name='Artwork 1', full_path='/mnt/M/JOBs/TEST_PROJECT', filename='a.jpg', type=link_type_1)
link2 = Link(name='Artwork 2', full_path='/mnt/M/JOBs/TEST_PROJECT', filename='b.jbg', type=link_type_1)
references = [link1, link2]
self.kwargs['code'] = 'SH12314'
self.kwargs['references'] = references
from stalker import Asset
new_asset = Asset(**self.kwargs)
assert (new_asset.references == references)
def test_StatusMixin_initialization(self):
from stalker import StatusList, Asset
status_list = StatusList.query.filter_by(target_entity_type='Asset').first()
self.kwargs['code'] = 'SH12314'
self.kwargs['status'] = 0
self.kwargs['status_list'] = status_list
new_asset = Asset(**self.kwargs)
assert (new_asset.status_list == status_list)
def test_TaskMixin_initialization(self):
from stalker import Type, Project, Asset, Task
commercial_project_type = Type(name='Commercial', code='comm', target_entity_type='Project')
new_project = Project(name='Commercial', code='COM', type=commercial_project_type, repository=self.repository)
character_asset_type = Type(name='Character', code='char', target_entity_type='Asset')
new_asset = Asset(name='test asset', type=character_asset_type, code='tstasset', project=new_project, responsible=[self.test_user1])
task1 = Task(name='Modeling', parent=new_asset)
task2 = Task(name='Lighting', parent=new_asset)
tasks = [task1, task2]
assert (sorted(new_asset.tasks, key=(lambda x: x.name)) == sorted(tasks, key=(lambda x: x.name)))
def test_plural_class_name(self):
assert (self.asset1.plural_class_name == 'Assets')
def test___strictly_typed___is_True(self):
from stalker import Asset
assert (Asset.__strictly_typed__ is True) |
def test_rename_member_type_ptr_null():
string = write_rpc_request(1, 'initialize', {'rootPath': str(test_dir)})
file_path = (test_dir / 'test_prog.f08')
string += rename_request('bp_rename', file_path, 17, 25)
(errcode, results) = run_request(string)
assert (errcode == 0)
ref = {}
ref[path_to_uri(str(file_path))] = [create('bp_rename', 17, 16, 17, 28)]
ref[path_to_uri(str(((test_dir / 'subdir') / 'test_free.f90')))] = [create('bp_rename', 11, 43, 11, 55)]
check_rename_response(results[1]['changes'], ref) |
class DefaultUpdateModelMixin(UpdateModelMixin):
def update(self: BaseGenericViewSet, request: Request, *args: Any, **kwargs: Any) -> Response:
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
instance = self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return self.get_response(instance, status.HTTP_200_OK)
def perform_update(self: BaseGenericViewSet, serializer: Any) -> Any:
return serializer.save() |
class UseCard(GenericAction):
def __init__(self, target, card):
self.source = self.target = target
self.card = card
def apply_action(self):
g = self.game
tgt = self.target
c = self.card
act = getattr(c, 'use_action', None)
if act:
return g.process_action(act(tgt, c))
else:
migrate_cards([c], g.deck.droppedcards, unwrap=True)
return True
def can_fire(self):
c = self.card
act = getattr(c, 'use_action', None)
if act:
return act(self.target, self.card).can_fire()
else:
return True |
class MultiRotorModel(DynamicsModel):
def __init__(self, config_file, normalization=True, model_name='multirotor_model'):
self.config = ModelConfig(config_file)
super(MultiRotorModel, self).__init__(config_dict=self.config.dynamics_model_config, normalization=normalization)
self.mass = self.config.model_config['mass']
self.moment_of_inertia = np.diag([self.config.model_config['moment_of_inertia']['Ixx'], self.config.model_config['moment_of_inertia']['Iyy'], self.config.model_config['moment_of_inertia']['Izz']])
self.rotor_config_dict = self.config.model_config['actuators']['rotors']
self.model_name = model_name
def prepare_force_regression_matrices(self):
accel_mat = self.data_df[['acc_b_x', 'acc_b_y', 'acc_b_z']].to_numpy()
force_mat = (accel_mat * self.mass)
self.data_df[['measured_force_x', 'measured_force_y', 'measured_force_z']] = force_mat
airspeed_mat = self.data_df[['V_air_body_x', 'V_air_body_y', 'V_air_body_z']].to_numpy()
aero_model = FuselageDragModel()
(X_aero, coef_dict_aero, col_names_aero) = aero_model.compute_fuselage_features(airspeed_mat)
self.data_df[col_names_aero] = X_aero
self.coef_dict.update(coef_dict_aero)
self.y_dict.update({'lin': {'x': 'measured_force_x', 'y': 'measured_force_y', 'z': 'measured_force_z'}})
def prepare_moment_regression_matrices(self):
moment_mat = np.matmul(self.data_df[['ang_acc_b_x', 'ang_acc_b_y', 'ang_acc_b_z']].to_numpy(), self.moment_of_inertia)
self.data_df[['measured_moment_x', 'measured_moment_y', 'measured_moment_z']] = moment_mat
self.y_dict.update({'rot': {'x': 'measured_moment_x', 'y': 'measured_moment_y', 'z': 'measured_moment_z'}}) |
def test():
assert (nlp.meta['name'] == 'core_web_sm'), 'Are you loading the correct pipeline?'
assert (nlp.meta['lang'] == 'en'), 'Are you loading the correct pipeline?'
assert ('print(nlp.pipe_names)' in __solution__), 'Are you printing the pipe names?'
assert ('print(nlp.pipeline)' in __solution__), 'Are you printing the pipeline?'
__msg__.good("Well done! Whenever you're unsure about the current pipeline, you can inspect it by printing nlp.pipe_names or nlp.pipeline.") |
_ns.route('/<username>/<coprname>/update_permissions/', methods=['POST'])
_ns.route('/g/<group_name>/<coprname>/update_permissions/', methods=['POST'])
_required
_with_copr
def copr_update_permissions(copr):
permissions = copr.copr_permissions
permissions_form = forms.PermissionsFormFactory.create_form_cls(permissions)()
if permissions_form.validate_on_submit():
try:
permissions.sort(key=(lambda x: ((- 1) if (x.user_id == flask.g.user.id) else 1)))
for perm in permissions:
old_builder = perm.copr_builder
old_admin = perm.copr_admin
new_builder = permissions_form['copr_builder_{0}'.format(perm.user_id)].data
new_admin = permissions_form['copr_admin_{0}'.format(perm.user_id)].data
coprs_logic.CoprPermissionsLogic.update_permissions(flask.g.user, copr, perm, new_builder, new_admin)
if (flask.current_app.config.get('SEND_EMAILS', False) and ((old_builder is not new_builder) or (old_admin is not new_admin))):
permission_dict = {'old_builder': old_builder, 'old_admin': old_admin, 'new_builder': new_builder, 'new_admin': new_admin}
msg = PermissionChangeMessage(copr, permission_dict)
send_mail([perm.user.mail], msg)
except exceptions.InsufficientRightsException as e:
db.session.rollback()
flask.flash(str(e), 'error')
else:
db.session.commit()
flask.flash('Project permissions were updated successfully.', 'success')
return flask.redirect(url_for_copr_details(copr)) |
('/url/<engine>/<injection>')
def url_reflect(engine, injection):
template = request.values.get('tpl')
if (not template):
template = '%s'
if (engine == 'mako'):
return ((randomword() + MakoTemplates((template % injection), lookup=mylookup).render()) + randomword())
elif (engine == 'jinja2'):
return ((randomword() + Jinja2Env.from_string((template % injection)).render()) + randomword())
elif (engine == 'eval'):
return ((randomword() + str(eval((template % injection)))) + randomword())
elif (engine == 'tornado'):
return ((randomword() + tornado.template.Template((template % injection)).generate().decode()) + randomword()) |
def transpose_to_ascii(buf, offset_array, wide_char_key):
str_len = len(buf)
key_len = len(wide_char_key)
if (str_len <= 0):
return ''
transposed = []
for i in range(str_len):
for offset_itr in range(key_len):
if (buf[i] == offset_array[offset_itr]):
transposed.append(wide_char_key[offset_itr])
break
transposed_str = [chr(char) for char in transposed]
return transposed_str |
def extractVerylazymtlBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('overgod', 'Overgod Ascension', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def get_layout_document_with_text_and_graphics_replaced_by_graphics(layout_document: LayoutDocument, semantic_graphics: Iterable[SemanticGraphic]) -> LayoutDocument:
return get_layout_document_with_text_or_graphic_replaced_by_graphics(layout_document, semantic_graphics=semantic_graphics, is_replace_overlapping_text=True) |
def validate_bls_withdrawal_credentials_list(input_bls_withdrawal_credentials_list: str) -> Sequence[bytes]:
bls_withdrawal_credentials_list = normalize_input_list(input_bls_withdrawal_credentials_list)
return [validate_bls_withdrawal_credentials(cred) for cred in bls_withdrawal_credentials_list] |
def test_integer_range_argument():
from_value = 10
to_value = 20
integer = IntegerArgument(from_value=from_value, to_value=to_value)
assert integer.validate(f'{to_value}')
assert integer.validate(f'{from_value}')
assert integer.validate('15')
value = 9
validation_status = integer.validate(f'{value}')
assert (not validation_status)
assert (validation_status.message() == (IntegerArgument.NOT_IN_RANGE % f'{from_value} <= {value} <= {to_value}'))
value = 21
validation_status = integer.validate(f'{value}')
assert (not validation_status)
assert (validation_status.message() == (IntegerArgument.NOT_IN_RANGE % f'{from_value} <= {value} <= {to_value}')) |
def get_encoded_array_by_index(content, index):
(offset, size) = get_uleb128(content)
userbyte = offset
for i in range(0, size):
(off, value) = get_encoded_value(content[offset:])
offset += off
userbyte += off
if (index == i):
return (userbyte, value)
return offset |
class RepeatedField(Packet):
def __init__(self, name, subfield_cls, length_field_cls=UIntByte):
super().__init__()
self.name = name
self.subfield_cls = subfield_cls
self.length_field = length_field_cls(('count of ' + name))
self.payload = []
def decode(self, data):
self.payload = []
data = self.length_field.decode(data)
for x in range(self.length_field.val):
field = self.subfield_cls()
data = field.decode(data)
self.payload.append(field)
return data
def show(self, depth=0):
print('{}{}: {}'.format((PRINT_INDENT * depth), self.name, self.length_field.val))
for field in self.payload:
field.show((depth + 1)) |
def write_sample_to_lsl() -> None:
sample_rate = 100
name = 'Mock_Signal'
type = 'mock_type'
n_channels = 10
info = StreamInfo(name, type, n_channels, sample_rate, 'float32', 'myuid34234')
outlet = StreamOutlet(info)
logger.log(1, 'now sending data...')
max_iter = 300
iter = 0
while (iter < max_iter):
outlet.push_sample(samples)
iter += 1
time.sleep(0.01) |
(boundscheck=False, wraparound=False)
def row_sum_loops(arr: const(V2d), columns: const(V1d_i)):
i: T_index
j: T_index
sum_: T
dtype = type(arr[(0, 0)])
res: V1d = np.empty(arr.shape[0], dtype=dtype)
for i in range(arr.shape[0]):
sum_ = dtype(0)
for j in range(columns.shape[0]):
sum_ += arr[(i, columns[j])]
res[i] = sum_
return res |
_renderer(wrap_type=MyTest)
class MyTestRenderer(TestRenderer):
def render_json(self, obj: MyTest) -> dict:
result = super().render_json(obj)
metric_result = obj._metric.get_result()
result['parameters']['condition'] = obj.get_condition().as_dict()
result['parameters']['reference_sum_value'] = metric_result.reference_sum_value
result['parameters']['current_sum_value'] = metric_result.current_sum_value
return result
def render_html(self, obj: MyTest) -> List[BaseWidgetInfo]:
info = super().render_html(obj)
metric_result = obj._metric.get_result()
figure = go.Figure(go.Bar(x=metric_result.x_values_for_hist, y=metric_result.y_values_for_hist))
info.with_details('', plotly_figure(title='Example plot', figure=figure))
return info |
def test_error_if_denominator_probability_is_zero_1_var():
df = {'var_A': (((['A'] * 6) + (['B'] * 10)) + (['C'] * 4)), 'var_B': (((['A'] * 10) + (['B'] * 6)) + (['C'] * 4)), 'target': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0]}
df = pd.DataFrame(df)
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError) as record:
encoder.fit(df[['var_A', 'var_B']], df['target'])
msg = "During the WoE calculation, some of the categories in the following features contained 0 in the denominator or numerator, and hence the WoE can't be calculated: var_A."
assert (str(record.value) == msg)
df = {'var_A': (((['A'] * 10) + (['B'] * 6)) + (['C'] * 4)), 'var_B': (((['A'] * 6) + (['B'] * 10)) + (['C'] * 4)), 'target': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0]}
df = pd.DataFrame(df)
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError) as record:
encoder.fit(df[['var_A', 'var_B']], df['target'])
msg = "During the WoE calculation, some of the categories in the following features contained 0 in the denominator or numerator, and hence the WoE can't be calculated: var_B."
assert (str(record.value) == msg) |
def setup_to_pass_ipv4():
shellexec('iptables -A INPUT -p tcp -m state --state ESTABLISHED -j ACCEPT')
shellexec('iptables -A INPUT -p udp -m state --state ESTABLISHED -j ACCEPT')
shellexec('iptables -A INPUT -p icmp -m state --state ESTABLISHED -j ACCEPT')
shellexec('iptables -A OUTPUT -p tcp -m state --state NEW,ESTABLISHED -j ACCEPT')
shellexec('iptables -A OUTPUT -p udp -m state --state NEW,ESTABLISHED -j ACCEPT')
shellexec('iptables -A OUTPUT -p icmp -m state --state NEW,ESTABLISHED -j ACCEPT')
(yield None)
shellexec('iptables -F') |
class BlobStagingArea(BaseStagingArea, BlobArea):
AREA_CONTAINER = 'staging'
def get_push_events_commits_file_path(self, date: datetime, company: str):
return f'{self.get_push_events_commits_parent_dir(date)}/{self.get_push_events_commits_filename(date, company)}'
def _get_file_name(path: str) -> str:
return path.split('/').pop()
def get_private_push_events_commits_file_path(date: datetime, company: str):
return date.strftime(f'{normalize_company(name=company).upper()}/github/events/push/%Y/%m/%Y-%m-%d.parquet')
def save_private_push_events_commits(self, push_event_commits: pd.DataFrame, company_name: str, date: datetime):
file_path = self.get_private_push_events_commits_file_path(date=date, company=company_name)
log.info(f'''Save private push events commits for {date} into file {file_path}
DF INFO: {get_pandas_data_frame_info(push_event_commits)}''')
self.write_pandas_dataframe_to_parquet(push_event_commits, file_path, index=False)
def save_push_events_commits(self, push_event_commits: pd.DataFrame, company_name: str, date: datetime):
file_path = self.get_push_events_commits_file_path(date=date, company=company_name)
log.info(f'''Save push events commits for {date} into file {file_path}
DF INFO: {get_pandas_data_frame_info(push_event_commits)}''')
self.write_pandas_dataframe_to_parquet(push_event_commits, file_path, index=False)
def get_push_events_commits_spark_paths(self, to_date: datetime, date_period_type: str=DatePeriodType.YTD, from_date: datetime=None, company=None) -> List[str]:
return self.get_spark_paths(self.get_push_events_commits_paths(to_date=to_date, date_period_type=date_period_type, from_date=from_date, company=company))
def get_push_events_commits(self, to_date: datetime, date_period_type: str=DatePeriodType.YTD, from_date: datetime=None, company=None) -> pd.DataFrame:
paths = self.get_push_events_commits_paths(to_date=to_date, date_period_type=date_period_type, from_date=from_date, company=company)
return (pd.concat([self.read_pandas_dataframe_from_parquet(path) for path in paths]) if paths else pd.DataFrame())
def get_push_events_commits_parent_dir(self, date: datetime) -> str:
return date.strftime(f'{self._github_events_commits_base}/%Y/%m/%d')
def get_push_events_commits_filename(date: datetime, company: str) -> str:
return date.strftime(f'{normalize_company(name=company)}-%Y-%m-%d.parquet')
def _get_date_partitioned_paths(self, dir_path: str, year: Union[(str, int)]=None, month: Union[(str, int)]=None, day: Union[(str, int)]=None, company: str=None) -> Iterable[str]:
dir_path += '/'
if (year is not None):
dir_path += f'{year}/'
if (month is not None):
dir_path += f'{str(month).zfill(2)}/'
if (day is not None):
dir_path += f'{str(day).zfill(2)}/'
if (company is not None):
dir_path += f'{normalize_company(name=company)}-'
return (blob['name'] for blob in self.container_client.list_blobs(name_starts_with=dir_path))
def _get_configuration_file_path(self, file: str) -> str:
return f'{self.CONF_AREA_DIR}/{file}'
def load_projects_filter(self):
return self.read_yaml_file(path=self._get_configuration_file_path(file='projects_filter.yaml'))
def get_repositories_path(self, date: datetime) -> str:
return f'{self._github_repositories_base}/{date:%Y/%m}/{self._get_repositories_file_name(date)}'
def get_repositories(self, date: datetime) -> pd.DataFrame:
return self.read_pandas_dataframe_from_parquet(path=self.get_repositories_path(date=date))
def save_repositories(self, df: pd.DataFrame, date: datetime):
self.write_pandas_dataframe_to_parquet(df=df, path=self.get_repositories_path(date=date))
def get_raw_push_events_commits_parent_dir_path(self, date: datetime):
return f'{self._github_raw_events_commits_base}/{date:%Y/%m/%d}'
def get_raw_push_events_commits_path(self, date: datetime, company: str) -> str:
return f'{self.get_raw_push_events_commits_parent_dir_path(date)}/{self._get_raw_push_events_commits_file_name(date, company)}'
def get_raw_push_events_commits(self, path: str) -> pd.DataFrame:
df = self.read_pandas_dataframe_from_parquet(path)
return (df if (df is not None) else pd.DataFrame())
def save_raw_push_events_commits(self, push_event_commits: pd.DataFrame, company_name: str, date: datetime):
self.write_pandas_dataframe_to_parquet(df=push_event_commits, path=self.get_raw_push_events_commits_path(date=date, company=company_name))
def get_daily_raw_push_events_commits_paths(self, date: datetime) -> Iterator[Union[str]]:
prefix = self.get_raw_push_events_commits_parent_dir_path(date)
return (blob['name'] for blob in self.container_client.list_blobs(name_starts_with=prefix)) |
def test_automatically_created_forms(client, msend):
client.post('/', headers={'referer': ' data={'name': 'john'})
query = Form.query.filter_by(host='somewhere.com', email='')
assert (query.count() == 1)
form = query.first()
assert (form.submissions.count() == 0)
form.confirmed = True
DB.session.add(form)
DB.session.commit()
client.post('/', headers={'referer': ' data={'_replyto': '', 'name': 'johann'})
form = query.first()
assert (form.submissions.count() == 1)
client.post('/', headers={'referer': ' data={'_replyto': '', '_next': ' 'name': 'johannes', 'message': 'salve!'})
form = query.first()
assert (form.submissions.count() == 2)
submissions = form.submissions.all()
assert (2 == len(submissions))
assert ('message' not in submissions[1].data)
assert ('_next' not in submissions[1].data)
assert ('_next' in submissions[0].data)
assert ('' == submissions[1].data['_replyto'])
assert ('' == submissions[0].data['_replyto'])
assert ('johann' == submissions[1].data['name'])
assert ('johannes' == submissions[0].data['name'])
assert ('salve!' == submissions[0].data['message'])
assert (settings.ARCHIVED_SUBMISSIONS_LIMIT == 2)
client.post('/', headers={'referer': ' data={'which-submission-is-this': 'the third!'})
assert (2 == form.submissions.count())
newest = form.submissions.first()
assert (newest.data['which-submission-is-this'] == 'the third!')
client.post('/', headers={'referer': ' data={'which-submission-is-this': 'the fourth!'})
assert (2 == form.submissions.count())
(newest, last) = form.submissions.all()
assert (newest.data['which-submission-is-this'] == 'the fourth!')
assert (last.data['which-submission-is-this'] == 'the third!')
client.post('/', headers={'referer': ' data={'name': 'send me the confirmation!'})
query = Form.query.filter_by(host='here.com', email='')
assert (query.count() == 1)
secondform = query.first()
assert (secondform.submissions.count() == 0)
secondform.confirmed = True
DB.session.add(form)
DB.session.commit()
client.post('/', headers={'referer': ' data={'name': 'leibniz'})
assert (1 == secondform.submissions.count())
assert (secondform.submissions.first().data['name'] == 'leibniz')
client.post('/', headers={'referer': ' data={'name': 'schelling'})
assert (2 == secondform.submissions.count())
(newest, last) = secondform.submissions.all()
assert (newest.data['name'] == 'schelling')
assert (last.data['name'] == 'leibniz')
client.post('/', headers={'referer': ' data={'name': 'husserl'})
assert (2 == secondform.submissions.count())
(newest, last) = secondform.submissions.all()
assert (newest.data['name'] == 'husserl')
assert (last.data['name'] == 'schelling')
(newest, last) = form.submissions.all()
assert (newest.data['which-submission-is-this'] == 'the fourth!')
assert (last.data['which-submission-is-this'] == 'the third!')
client.post('/', headers={'referer': ' data={'which-submission-is-this': 'the fifth!'})
assert (2 == form.submissions.count())
(newest, last) = form.submissions.all()
assert (newest.data['which-submission-is-this'] == 'the fifth!')
assert (last.data['which-submission-is-this'] == 'the fourth!')
assert (2 == secondform.submissions.count())
(newest, last) = secondform.submissions.all()
assert (newest.data['name'] == 'husserl')
assert (last.data['name'] == 'schelling') |
class StatusSerializationCodec(SerializationCodecAPI[StatusPayload]):
item_sedes: ClassVar[Dict[(str, Any)]]
_tuple
def _encode_items(self, *items: Tuple[(str, Any)]) -> Iterable[Tuple[(str, bytes)]]:
for (key, value) in items:
if (key not in self.item_sedes):
raise ValidationError(f'Unknown key: {key}')
item_sedes = self.item_sedes[key]
if (item_sedes is None):
(yield (key, b''))
else:
(yield (key, item_sedes.serialize(value)))
def encode(self, payload: StatusPayload) -> bytes:
items = self._encode_items(*payload.to_pairs())
return rlp.encode(items, sedes=STATUS_STRUCTURE)
_tuple
def _decode_items(self, *items: Tuple[(str, bytes)]) -> Iterable[Tuple[(str, Any)]]:
for (key, raw_value) in items:
if (key not in self.item_sedes):
(yield (key, raw_value))
continue
item_sedes = self.item_sedes[key]
if (item_sedes is None):
(yield (key, None))
else:
value = item_sedes.deserialize(raw_value)
(yield (key, value))
def decode(self, data: bytes) -> StatusPayload:
raw_items = rlp.decode(data, sedes=STATUS_STRUCTURE, recursive_cache=True)
items = self._decode_items(*raw_items)
return StatusPayload.from_pairs(*items) |
def build_table_violations(table, rule_name):
data_str = table.data
return [rre.RuleViolation(resource_name=('bigquery_tables/' + table.id), resource_id=table.id, resource_type=table.type, full_name=table.full_name, rule_index=0, rule_name=rule_name, violation_type='RETENTION_VIOLATION', violation_data=data_str, resource_data=table.data)] |
class OptionSeriesParetoSonificationDefaultinstrumentoptionsMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_read_top_level_from_top_level_txt() -> None:
class MockDistribution():
def __init__(self) -> None:
pass
def read_text(self, file_name: str) -> str:
return 'foo\nbar'
with patch('deptry.dependency.metadata.distribution') as mock:
mock.return_value = MockDistribution()
dependency = Dependency('Foo-bar', Path('pyproject.toml'))
assert (dependency.name == 'Foo-bar')
assert (dependency.definition_file == Path('pyproject.toml'))
assert (dependency.top_levels == {'foo', 'bar'}) |
class GithubCliSetupError(ErsiliaError):
def __init__(self):
self.message = self._get_message()
self.hints = self._get_hints()
ErsiliaError.__init__(self, self.message, self.hints)
def _get_message(self):
text = 'GitHub CLI is not installed! GitHub CLI is a fantastic tool to interact with GitHub. Ersilia uses it in the backend.'
return text
def _get_hints(self):
text = 'An easy way to install the GitHub CLI is the following Conda command:\n'
text += '$ conda install -c conda-forge gh'
return text |
class _AdviseConnection(object):
def __init__(self, source, interface, receiver):
self.cp = None
self.cookie = None
self.receiver = None
self._connect(source, interface, receiver)
def _connect(self, source, interface, receiver):
cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer)
self.cp = cpc.FindConnectionPoint(ctypes.byref(interface._iid_))
logger.debug('Start advise %s', interface)
self.cookie = self.cp.Advise(receiver)
self.receiver = receiver
def disconnect(self):
if self.cookie:
self.cp.Unadvise(self.cookie)
logger.debug('Unadvised %s', self.cp)
self.cp = None
self.cookie = None
del self.receiver
def __del__(self):
try:
if (self.cookie is not None):
self.cp.Unadvise(self.cookie)
except (comtypes.COMError, WindowsError):
pass |
class CoinChooserRandom(CoinChooserBase):
def bucket_candidates(self, buckets, sufficient_funds):
candidates = set()
for (n, bucket) in enumerate(buckets):
if sufficient_funds([bucket]):
candidates.add((n,))
attempts = min(100, (((len(buckets) - 1) * 10) + 1))
permutation = list(range(len(buckets)))
for _i in range(attempts):
self.p.shuffle(permutation)
bkts = []
for (count, index) in enumerate(permutation):
bkts.append(buckets[index])
if sufficient_funds(bkts):
candidates.add(tuple(sorted(permutation[:(count + 1)])))
break
else:
raise NotEnoughFunds()
candidates = [[buckets[n] for n in c] for c in candidates]
return [strip_unneeded(c, sufficient_funds) for c in candidates]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
candidates = self.bucket_candidates(buckets, sufficient_funds)
penalties = [penalty_func(cand) for cand in candidates]
winner = candidates[penalties.index(min(penalties))]
logger.debug('Bucket sets: %d', len(buckets))
logger.debug('Winning penalty: %d', min(penalties))
return winner |
class TestOnedockerRunner(unittest.TestCase):
def setUp(self):
expected = CertificateRequest(key_algorithm=KeyAlgorithm.RSA, key_size=4096, passphrase='test', cert_folder=None, private_key_name=None, certificate_name=None, days_valid=5, country_name='US', state_or_province_name=None, locality_name=None, organization_name='Test Company', common_name=None, dns_name=None)
self.test_cert_params = expected.convert_to_cert_params()
def test_simple_args(self):
doc = __onedocker_runner_doc__
args = docopt(doc, ['test_package', '--version=1.0'])
self.assertEqual(args['<package_name>'], 'test_package')
self.assertEqual(args['--version'], '1.0')
self.assertEqual(args['--repository_path'], None)
self.assertEqual(args['--exe_path'], None)
self.assertEqual(args['--exe_args'], None)
self.assertEqual(args['--timeout'], None)
self.assertEqual(args['--log_path'], None)
self.assertEqual(args['--cert_params'], None)
def test_complex_args(self):
doc = __onedocker_runner_doc__
args = docopt(doc, ['test_package_3', '--version=4.20', '--repository_path=/foo/bar/path', '--timeout=23', "--exe_args='-h'"])
self.assertEqual(args['<package_name>'], 'test_package_3')
self.assertEqual(args['--version'], '4.20')
self.assertEqual(args['--repository_path'], '/foo/bar/path')
self.assertEqual(args['--exe_path'], None)
self.assertEqual(args['--exe_args'], "'-h'")
self.assertEqual(args['--timeout'], '23')
self.assertEqual(args['--log_path'], None)
self.assertEqual(args['--cert_params'], None)
def test_main_no_args(self):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(str(cm.exception), 'Usage:\n onedocker-runner <package_name> --version=<version> [options]')
def test_main_local(self):
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--repository_path=local', '--exe_path=/usr/bin/', '--exe_args=test_message']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.SUCCESS)
def test_main_local_timeout(self):
with patch.object(sys, 'argv', ['onedocker-runner', 'sleep', '--version=latest', '--repository_path=local', '--exe_path=/usr/bin/', '--exe_args=2', '--timeout=1']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.TIMEOUT)
('onedocker.script.runner.onedocker_runner.run_cmd')
def test_main_local_run_command_error(self, mock_run_cmd):
mock_run_cmd.side_effect = Exception('Failed to run command')
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--repository_path=local', '--exe_path=/usr/bin/', '--exe_args=test_message']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.ERROR)
def test_main_local_executable_unavailable(self):
with patch.object(sys, 'argv', ['onedocker-runner', 'foo', '--version=latest', '--repository_path=local', '--exe_path=/usr/bin/']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.SERVICE_UNAVAILABLE)
('onedocker.script.runner.onedocker_runner.run_cmd')
def test_main_local_executable_failed(self, mock_run_cmd):
mock_run_cmd.return_value = 1
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--repository_path=local', '--exe_path=/usr/bin/', '--exe_args=test_message']):
with self.assertRaises(SystemExit) as cm:
main()
mock_run_cmd.assert_called_once_with('/usr/bin/echo test_message', None)
self.assertEqual(cm.exception.code, ExitCode.EXE_ERROR)
(OneDockerRepositoryService, 'download')
('onedocker.script.runner.onedocker_runner.S3Path')
('onedocker.script.runner.onedocker_runner.S3StorageService')
def test_main(self, MockS3StorageService, MockS3Path, mockOneDockerRepositoryServiceDownload):
MockS3Path.region = MagicMock(return_value='us_west_1')
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--repository_path=test_repo_path', '--timeout=1200', '--exe_path=/usr/bin/', '--exe_args=test_message']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.SUCCESS)
mockOneDockerRepositoryServiceDownload.assert_called_once_with('echo', 'latest', '/usr/bin/echo')
def test_main_bad_cert(self):
wrong_cert_params = str({'key_algorithm': KeyAlgorithm.RSA.value, 'key_size': 4096, 'organization_name': 'Test Organization'})
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--exe_path=/usr/bin/', '--exe_args=measurement/private_measurement/pcp/oss/onedocker/tests/script/runner', f'--cert_params={wrong_cert_params}']):
with patch('os.getenv', side_effect=(lambda x: getenv(x))):
with self.assertRaises(InvalidParameterError):
main()
def test_main_env(self):
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--exe_args="Test Message"', '--exe_path=/usr/bin/', '--repository_path=local']):
with patch('os.getenv', return_value=None):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, ExitCode.SUCCESS)
(OneDockerRepositoryService, 'download')
('onedocker.script.runner.onedocker_runner.S3Path')
('onedocker.script.runner.onedocker_runner.S3StorageService')
('onedocker.script.runner.onedocker_runner._run_opawdl')
def test_main_with_opa_enabled(self, mockOneDockerRunOPAWDL, MockS3StorageService, MockS3Path, mockOneDockerRepositoryServiceDownload):
test_opa_workflow_path = '/home/xyz.json'
MockS3Path.region = MagicMock(return_value='us_west_1')
with patch.object(sys, 'argv', ['onedocker-runner', 'echo', '--version=latest', '--repository_path=test_repo_path', '--timeout=1200', '--exe_path=/usr/bin/', '--exe_args=test_message', f'--opa_workflow_path={test_opa_workflow_path}']):
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
mockOneDockerRunOPAWDL.assert_called_once_with(test_opa_workflow_path)
('onedocker.script.runner.onedocker_runner.uuid.uuid4', side_effect=[123, 456])
('onedocker.script.runner.onedocker_runner.LocalOPAWDLWorkflowInstanceRepository.exist', side_effect=[True, False])
def test_gen_opawdl_instance_id(self, mock_exist, mock_uuid4):
test_repo = LocalOPAWDLWorkflowInstanceRepository('')
res_instance_id = _gen_opawdl_instance_id(test_repo)
self.assertEqual(res_instance_id, '456') |
def getgw(iface):
result = ''
try:
output = os.popen('route print')
for line in output:
l = line.split()
if ((l[1].strip() == '0.0.0.0') and (l[3].strip() != '0.0.0.0')):
result = l[3]
break
except:
result = ''
return result |
class AdCampaignDeliveryEstimate(AbstractObject):
def __init__(self, api=None):
super(AdCampaignDeliveryEstimate, self).__init__()
self._isAdCampaignDeliveryEstimate = True
self._api = api
class Field(AbstractObject.Field):
daily_outcomes_curve = 'daily_outcomes_curve'
estimate_dau = 'estimate_dau'
estimate_mau_lower_bound = 'estimate_mau_lower_bound'
estimate_mau_upper_bound = 'estimate_mau_upper_bound'
estimate_ready = 'estimate_ready'
targeting_optimization_types = 'targeting_optimization_types'
class OptimizationGoal():
ad_recall_lift = 'AD_RECALL_LIFT'
app_installs = 'APP_INSTALLS'
app_installs_and_offsite_conversions = 'APP_INSTALLS_AND_OFFSITE_CONVERSIONS'
conversations = 'CONVERSATIONS'
derived_events = 'DERIVED_EVENTS'
engaged_users = 'ENGAGED_USERS'
event_responses = 'EVENT_RESPONSES'
impressions = 'IMPRESSIONS'
in_app_value = 'IN_APP_VALUE'
landing_page_views = 'LANDING_PAGE_VIEWS'
lead_generation = 'LEAD_GENERATION'
link_clicks = 'LINK_CLICKS'
messaging_appointment_conversion = 'MESSAGING_APPOINTMENT_CONVERSION'
messaging_purchase_conversion = 'MESSAGING_PURCHASE_CONVERSION'
none = 'NONE'
offsite_conversions = 'OFFSITE_CONVERSIONS'
page_likes = 'PAGE_LIKES'
post_engagement = 'POST_ENGAGEMENT'
quality_call = 'QUALITY_CALL'
quality_lead = 'QUALITY_LEAD'
reach = 'REACH'
reminders_set = 'REMINDERS_SET'
subscribers = 'SUBSCRIBERS'
thruplay = 'THRUPLAY'
value = 'VALUE'
visit_instagram_profile = 'VISIT_INSTAGRAM_PROFILE'
_field_types = {'daily_outcomes_curve': 'list<OutcomePredictionPoint>', 'estimate_dau': 'int', 'estimate_mau_lower_bound': 'int', 'estimate_mau_upper_bound': 'int', 'estimate_ready': 'bool', 'targeting_optimization_types': 'list<map<string, int>>'}
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['OptimizationGoal'] = AdCampaignDeliveryEstimate.OptimizationGoal.__dict__.values()
return field_enum_info |
class AnonymizationEntity(BaseModel):
offset: int = Field(..., ge=0)
length: int = Field(..., gt=0)
category: CategoryType
subcategory: SubCategoryType
original_label: str = Field(..., min_length=1)
content: str = Field(..., min_length=1)
confidence_score: Optional[float] = Field(..., ge=0, le=1)
model_config = ConfigDict(use_enum_values=True)
_validator('content', mode='before')
def content_must_be_str(cls, v):
if (not isinstance(v, str)):
raise TypeError('entity must be a string')
return v
_validator('original_label', mode='before')
def original_label_must_be_str(cls, v):
if (not isinstance(v, str)):
raise TypeError('original_label must be a string')
return v
_validator(mode='after')
def content_length_must_be_equal_to_length(self):
if (len(self.content) != self.length):
raise ValueError('content length must be equal to length')
return self
_validator('confidence_score')
def round_confidence_score(cls, v):
if (v is not None):
return round(v, 3)
return v
_serializer('subcategory', mode='plain', when_used='always')
def serialize_subcategory(self, value: SubCategoryType, _: FieldSerializationInfo):
return value.value |
class Paginator(DjangoPaginator):
page_query_param = 'page'
invalid_page_message = 'Invalid page.'
def __init__(self, request, object_list, per_page, **kwargs):
super(Paginator, self).__init__(object_list, per_page, **kwargs)
self._page = None
self.request = request
def get_page(self, page):
try:
self._page = self.page(page)
except InvalidPage:
raise NotFound(self.invalid_page_message)
else:
return self._page
def get_next_link(self):
if (not self._page.has_next()):
return None
url = self.request.build_absolute_uri()
page_number = self._page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if (not self._page.has_previous()):
return None
url = self.request.build_absolute_uri()
page_number = self._page.previous_page_number()
if (page_number == 1):
return None
return replace_query_param(url, self.page_query_param, page_number)
def get_response_results(self, results):
return OrderedDict([('count', self._page.paginator.count), ('next', self.get_next_link()), ('previous', self.get_previous_link()), ('results', results)]) |
class TlsSubscriptionResponseAttributes(ModelComposed):
allowed_values = {('state',): {'PENDING': 'pending', 'PROCESSING': 'processing', 'ISSUED': 'issued', 'RENEWING': 'renewing', 'FAILED': 'failed'}}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'created_at': (datetime, none_type), 'deleted_at': (datetime, none_type), 'updated_at': (datetime, none_type), 'state': (str,)}
_property
def discriminator():
return None
attribute_map = {'created_at': 'created_at', 'deleted_at': 'deleted_at', 'updated_at': 'updated_at', 'state': 'state'}
read_only_vars = {'created_at', 'deleted_at', 'updated_at'}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
_property
def _composed_schemas():
lazy_import()
return {'anyOf': [], 'allOf': [Timestamps, TlsSubscriptionResponseAttributesAllOf], 'oneOf': []} |
class TargetingSearch(AbstractObject):
class DemographicSearchClasses(object):
demographics = 'demographics'
ethnic_affinity = 'ethnic_affinity'
family_statuses = 'family_statuses'
generation = 'generation'
home_ownership = 'home_ownership'
home_type = 'home_type'
home_value = 'home_value'
household_composition = 'household_composition'
income = 'income'
industries = 'industries'
life_events = 'life_events'
markets = 'markets'
moms = 'moms'
net_worth = 'net_worth'
office_type = 'office_type'
politics = 'politics'
class TargetingSearchTypes(object):
country = 'adcountry'
education = 'adeducationschool'
employer = 'adworkemployer'
geolocation = 'adgeolocation'
geometadata = 'adgeolocationmeta'
interest = 'adinterest'
interest_suggestion = 'adinterestsuggestion'
interest_validate = 'adinterestvalid'
keyword = 'adkeyword'
locale = 'adlocale'
major = 'adeducationmajor'
position = 'adworkposition'
radius_suggestion = 'adradiussuggestion'
targeting_category = 'adtargetingcategory'
zipcode = 'adzipcode'
def search(cls, params=None, api=None):
api = (api or FacebookAdsApi.get_default_api())
if (not api):
raise FacebookBadObjectError('An Api instance must be provided as an argument or set as the default Api in FacebookAdsApi.')
params = ({} if (not params) else params.copy())
response = api.call(FacebookAdsApi.HTTP_METHOD_GET, '/'.join((FacebookSession.GRAPH, FacebookAdsApi.API_VERSION, 'search')), params).json()
ret_val = []
if response:
keys = response['data']
if isinstance(keys, list):
for item in keys:
search_obj = TargetingSearch()
search_obj.update(item)
ret_val.append(search_obj)
elif isinstance(keys, dict):
for item in keys:
search_obj = TargetingSearch()
search_obj.update(keys[item])
if keys[item]:
ret_val.append(search_obj)
return ret_val |
def get_coords(obj: 'Color', fit: ((bool | str) | dict[(str, Any)]), none: bool, legacy: bool) -> Vector:
if fit:
if (fit is True):
color = obj.fit()
elif isinstance(fit, str):
color = obj.fit(method=fit)
else:
color = obj.fit(**fit)
else:
color = obj
return color.coords(nans=(False if (legacy or (not none)) else True)) |
class LookalikeSpec(AbstractObject):
def __init__(self, api=None):
super(LookalikeSpec, self).__init__()
self._isLookalikeSpec = True
self._api = api
class Field(AbstractObject.Field):
country = 'country'
is_financial_service = 'is_financial_service'
origin = 'origin'
origin_event_name = 'origin_event_name'
origin_event_source_name = 'origin_event_source_name'
origin_event_source_type = 'origin_event_source_type'
product_set_name = 'product_set_name'
ratio = 'ratio'
starting_ratio = 'starting_ratio'
target_countries = 'target_countries'
target_country_names = 'target_country_names'
type = 'type'
_field_types = {'country': 'string', 'is_financial_service': 'bool', 'origin': 'list<Object>', 'origin_event_name': 'string', 'origin_event_source_name': 'string', 'origin_event_source_type': 'string', 'product_set_name': 'string', 'ratio': 'float', 'starting_ratio': 'float', 'target_countries': 'list<string>', 'target_country_names': 'list', 'type': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
def get_advisory_opinions(from_ao_no):
bucket = get_bucket()
ao_names = get_ao_names()
ao_no_to_component_map = {a: tuple(map(int, a.split('-'))) for a in ao_names}
citations = get_citations(ao_names)
if (from_ao_no is None):
(start_ao_year, start_ao_serial) = (0, 0)
else:
(start_ao_year, start_ao_serial) = tuple(map(int, from_ao_no.split('-')))
with db.engine.connect() as conn:
rs = conn.execute(ALL_AOS, (start_ao_year, start_ao_serial, start_ao_year))
for row in rs:
ao_id = row['ao_id']
(year, serial) = ao_no_to_component_map[row['ao_no']]
ao = {'type': AO_DOC_TYPE, 'no': row['ao_no'], 'ao_no': row['ao_no'], 'ao_year': row['ao_year'], 'ao_serial': row['ao_serial'], 'doc_id': '{0}_{1}'.format(AO_DOC_TYPE, row['ao_no']), 'name': row['name'], 'summary': row['summary'], 'request_date': row['req_date'], 'issue_date': row['issue_date'], 'is_pending': ao_stage_to_pending(row['stage']), 'status': ao_stage_to_status(row['ao_no'], row['stage']), 'ao_citations': citations[row['ao_no']]['ao'], 'aos_cited_by': citations[row['ao_no']]['aos_cited_by'], 'statutory_citations': citations[row['ao_no']]['statutes'], 'regulatory_citations': citations[row['ao_no']]['regulations'], 'sort1': (- year), 'sort2': (- serial)}
ao['documents'] = get_documents(ao_id, bucket)
(ao['requestor_names'], ao['requestor_types'], ao['commenter_names'], ao['representative_names'], ao['entities']) = get_entities(ao_id)
(yield ao) |
def create_project():
print('\n\nCreating a project on Platform.sh...')
org_output = make_sp_call('platform org:info', capture_output=True).stdout.decode()
org_id = re.search('([A-Z0-9]{26})', org_output).group(1)
print(f' Found Platform.sh organization id: {org_id}')
make_sp_call(f'platform create --title my_blog_project --org {org_id} --region us-3.platform.sh --yes') |
def create_tcf_experiences_on_startup(db: Session) -> List[PrivacyExperience]:
experiences_created: List[PrivacyExperience] = []
for region in EEA_COUNTRIES:
if (not PrivacyExperience.get_experience_by_region_and_component(db, region.value, ComponentType.tcf_overlay)):
experiences_created.append(PrivacyExperience.create_default_experience_for_region(db, region, ComponentType.tcf_overlay))
return experiences_created |
class ZClientDumper(ZClient):
_ev_cls(zclient_event.EventZServConnected)
def _zserv_connected_handler(self, ev):
self.logger.info('Zebra server connected to %s: %s', ev.zserv.sock.getpeername(), ev.zserv.sock)
_ev_cls(event.EventZebraRouterIDUpdate)
def _router_id_update_handler(self, ev):
self.logger.info('ZEBRA_ROUTER_ID_UPDATE received: %s', ev.__dict__)
_ev_cls(event.EventZebraInterfaceAdd)
def _interface_add_handler(self, ev):
self.logger.info('ZEBRA_INTERFACE_ADD received: %s', ev.__dict__)
_ev_cls(event.EventZebraInterfaceAddressAdd)
def _interface_address_add_handler(self, ev):
self.logger.info('ZEBRA_INTERFACE_ADDRESS_ADD received: %s', ev.__dict__)
_ev_cls(zclient_event.EventZServDisconnected)
def _zserv_disconnected_handler(self, ev):
self.logger.info('Zebra server disconnected: %s', ev.zserv.sock) |
def extractThecrankWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesBarEvents(Options):
def afterAnimate(self):
return self._config_get(None)
def afterAnimate(self, value: Any):
self._config(value, js_type=False)
def checkboxClick(self):
return self._config_get(None)
def checkboxClick(self, value: Any):
self._config(value, js_type=False)
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def hide(self):
return self._config_get(None)
def hide(self, value: Any):
self._config(value, js_type=False)
def legendItemClick(self):
return self._config_get(None)
def legendItemClick(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def show(self):
return self._config_get(None)
def show(self, value: Any):
self._config(value, js_type=False) |
class MyScaffoldMessage(Message):
protocol_id = PublicId.from_str('fetchai/scaffold:0.1.0')
serializer = MyScaffoldSerializer
class Performative(Enum):
def __str__(self) -> str:
return str(self.value)
def __init__(self, performative: Performative, **kwargs: Any) -> None:
super().__init__(performative=performative, **kwargs)
enforce(self._is_consistent(), 'MyScaffoldMessage initialization inconsistent.')
def _is_consistent(self) -> bool:
try:
raise NotImplementedError
except (AssertionError, ValueError):
return False
return True |
class GethBenchmarkFixture():
def __init__(self) -> None:
self.rpc_port = self._rpc_port()
self.endpoint_uri = self._endpoint_uri()
self.geth_binary = self._geth_binary()
def build(self) -> Generator[(Any, None, None)]:
with TemporaryDirectory() as base_dir:
zipfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../tests/integration/', GETH_FIXTURE_ZIP))
tmp_datadir = os.path.join(str(base_dir), 'datadir')
with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:
zip_ref.extractall(tmp_datadir)
self.datadir = tmp_datadir
genesis_file = os.path.join(self.datadir, 'genesis.json')
(yield self._geth_process(self.datadir, genesis_file, self.rpc_port))
def _rpc_port(self) -> str:
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def _endpoint_uri(self) -> str:
return f'
def _geth_binary(self) -> str:
if ('GETH_BINARY' in os.environ):
return os.environ['GETH_BINARY']
elif ('GETH_VERSION' in os.environ):
geth_version = os.environ['GETH_VERSION']
_geth_binary = get_executable_path(geth_version)
if (not os.path.exists(_geth_binary)):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return 'geth'
def _geth_command_arguments(self, datadir: str) -> Sequence[str]:
return (self.geth_binary, '--datadir', str(datadir), '--nodiscover', '--fakepow', '-- '-- self.rpc_port, '-- 'admin,eth,net,web3,personal,miner', '--ipcdisable', '--allow-insecure-unlock', '--miner.etherbase', COINBASE[2:], '--rpc.enabledeprecatedpersonal')
def _geth_process(self, datadir: str, genesis_file: str, rpc_port: str) -> Generator[(Any, None, None)]:
init_datadir_command = (self.geth_binary, '--datadir', str(datadir), 'init', str(genesis_file))
check_output(init_datadir_command, stdin=PIPE, stderr=PIPE)
proc = Popen(self._geth_command_arguments(datadir), stdin=PIPE, stdout=PIPE, stderr=PIPE)
try:
(yield proc)
finally:
kill_proc_gracefully(proc) |
class CommunityData(object):
def __init__(self, communityName, mpModel=1):
self.mpModel = mpModel
self.communityName = communityName
def __hash__(self):
return hash((self.communityName, self.mpModel))
def __repr__(self):
return ('%s(communityName=<COMMUNITY>, mpModel=%r)' % (self.__class__.__name__, self.communityName, self.mpModel)) |
class OsherFunc(ObjectiveFunction_base):
def __init__(self, LHS_s, RHS_s, fFunc, t, x):
ObjectiveFunction_base.__init__(self, LHS_s, RHS_s)
self.fFunc = fFunc
self.xi = old_div(x, t)
if (LHS_s < RHS_s):
self.getResidual = self.Argmin
else:
self.getResidual = self.Argmax
def Argmin(self, s):
return (self.fFunc.getFlux(s) - (self.xi * s))
def Argmax(self, s):
return ((self.xi * s) - self.fFunc.getFlux(s)) |
def _loop_once():
global patch, name, path, monitor
global duration_scale, duration_offset, serialdevice, s, lock, trigger, chanindx, chanstr, redischannel, thread
for chanindx in range(1, 5):
chanstr = ('cv%d' % chanindx)
chanval = patch.getfloat('input', chanstr)
if (chanval == None):
monitor.trace((chanstr + ' is not available'))
continue
scale = patch.getfloat('scale', chanstr, default=4095)
offset = patch.getfloat('offset', chanstr, default=0)
chanval = EEGsynth.rescale(chanval, slope=scale, offset=offset)
chanval = EEGsynth.limit(chanval, lo=0, hi=4095)
chanval = int(chanval)
SetControl(chanindx, chanval)
monitor.update(chanstr, chanval)
for chanindx in range(1, 5):
chanstr = ('gate%d' % chanindx)
chanval = patch.getfloat('input', chanstr)
if (chanval == None):
monitor.trace((chanstr + 'is not available'))
continue
scale = patch.getfloat('scale', chanstr, default=4095)
offset = patch.getfloat('offset', chanstr, default=0)
chanval = EEGsynth.rescale(chanval, slope=scale, offset=offset)
chanval = int((chanval > 0))
SetGate(chanindx, chanval)
monitor.update(chanstr, chanval) |
class TestRunFailsWhenConnectionClassNotPresent(AEATestCaseEmpty):
def setup_class(cls):
super().setup_class()
cls.connection_id = str(HTTP_ClIENT_PUBLIC_ID)
cls.connection_name = '
cls.generate_private_key(FetchAICrypto.identifier)
cls.add_private_key(FetchAICrypto.identifier)
cls.add_item('connection', cls.connection_id)
cls.set_config('agent.default_connection', cls.connection_id)
Path(cls.t, cls.agent_name, 'vendor', 'fetchai', 'connections', cls.connection_name, 'connection.py').write_text('')
def test_run(self):
expected_message = "Package loading error: An error occurred while loading connection {}: Connection class '{}' not found.".format(self.connection_id, 'HTTPClientConnection')
with pytest.raises(ClickException, match=expected_message):
self.run_cli_command('--skip-consistency-check', 'run', '--connections', self.connection_id, cwd=self._get_cwd()) |
def create_git_index(client, index):
user_mapping = {'properties': {'name': {'type': 'text', 'fields': {'raw': {'type': 'keyword'}}}}}
client.indices.create(index=index, body={'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'analysis': {'analyzer': {'file_path': {'type': 'custom', 'tokenizer': 'path_hierarchy', 'filter': ['lowercase']}}}}, 'mappings': {'properties': {'description': {'type': 'text', 'analyzer': 'snowball'}, 'commit_repo': {'type': 'join', 'relations': {'repo': 'commit'}}, 'author': user_mapping, 'authored_date': {'type': 'date'}, 'committer': user_mapping, 'committed_date': {'type': 'date'}, 'parent_shas': {'type': 'keyword'}, 'files': {'type': 'text', 'analyzer': 'file_path', 'fielddata': True}, 'is_public': {'type': 'boolean'}, 'owner': user_mapping, 'created_at': {'type': 'date'}, 'tags': {'type': 'keyword'}}}}) |
def schedule_verbose(sched: str) -> str:
created = sched.split('|')[0]
due = sched.split('|')[1]
stype = sched.split('|')[2][0:2]
stype_val = sched.split('|')[2][3:]
if (stype == 'wd'):
days = ', '.join([weekday_name_abbr(int(c)) for c in stype_val])
return f'Scheduled for every {days}.'
if (stype == 'id'):
if (stype_val == '2'):
return f'Scheduled for every second day.'
if (stype_val == '1'):
return f'Scheduled to appear everyday.'
return f'Scheduled to appear every {stype_val} days.'
if (stype == 'td'):
delta_days = (datetime.now().date() - dt_from_stamp(created).date()).days
if (delta_days == 0):
if (stype_val == '1'):
return f'Scheduled today to appear tomorrow.'
else:
return f'Scheduled today to appear in {stype_val} day(s).'
if (delta_days == 1):
if (stype_val == '1'):
return f'Scheduled yesterday to appear today.'
elif (stype_val == 2):
return f'Scheduled yesterday to appear tomorrow.'
else:
return f'Scheduled yesterday to appear in {stype_val} day(s).'
return f'Scheduled {delta_days} days ago to appear in {stype_val} day(s).'
if (stype == 'gd'):
factor = stype_val.split(';')[0]
return f'Scheduled with growing interval (factor {round(float(factor), 1)})' |
class RunBenchUnitTest(unittest.TestCase):
def setUp(self):
self.app = RunBench()
def test_getUnknownArgs(self):
self.app.unknowns = '--remote -b /home/mobilenet_v3.json --platform android --framework pytorch --devices SM-G981U1-11-30 --buck_target'.split()
expected = {'--remote': None, '-b': '/home/mobilenet_v3.json', '--platform': 'android', '--framework': 'pytorch', '--devices': 'SM-G981U1-11-30', '--buck_target': None}
self.assertEqual(self.app._getUnknownArgs(), expected)
self.app.unknowns = '--remote True -b --platform android --framework pytorch --devices SM-G981U1-11-30 Pixel_5 --buck_target'.split()
expected = {'--remote': 'True', '-b': None, '--platform': 'android', '--framework': 'pytorch', '--devices': 'SM-G981U1-11-30', '--buck_target': None}
self.assertEqual(self.app._getUnknownArgs(), expected)
self.app.unknowns = '--remote -b --platform --framework -d --buck_target -c -e'.split()
expected = {'--remote': None, '-b': None, '--platform': None, '--framework': None, '-d': None, '--buck_target': None, '-c': None, '-e': None}
self.assertEqual(self.app._getUnknownArgs(), expected)
self.app.unknowns = '--remote -b --platform android --framework pytorch --devices SM-G981U1-11-30,Pixel_5 SM-A530W-9-28'.split()
expected = {'--remote': None, '-b': None, '--platform': 'android', '--framework': 'pytorch', '--devices': 'SM-G981U1-11-30,Pixel_5'}
self.assertEqual(self.app._getUnknownArgs(), expected) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'id'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'firewall_internet_service': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_internet_service']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_internet_service']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_internet_service')
(is_error, has_changed, result, diff) = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class ComponentParser(LoggingConfigurable):
component_platform: RuntimeProcessorType = None
_file_types: List[str] = None
_parser_class_map: Dict[(str, str)] = {'APACHE_AIRFLOW': 'elyra.pipeline.airflow.component_parser_airflow:AirflowComponentParser', 'KUBEFLOW_PIPELINES': 'elyra.pipeline.kfp.component_parser_kfp:KfpComponentParser'}
def create_instance(cls, platform: RuntimeProcessorType) -> ComponentParser:
try:
(module_name, class_name) = cls._parser_class_map[platform.name].split(':')
module = import_module(module_name)
return getattr(module, class_name)()
except Exception as e:
raise RuntimeError(f'Could not get appropriate ComponentParser class: {e}')
def file_types(self) -> List[str]:
return self._file_types
def parse(self, catalog_entry: catalog_connector.CatalogEntry) -> Optional[List[Component]]:
raise NotImplementedError()
def _format_description(self, description: str, data_type: str) -> str:
if description:
return f'{description} (type: {data_type})'
return f'(type: {data_type})'
def determine_type_information(self, parsed_type: str) -> 'PropertyTypeInfo':
parsed_type_lowered = parsed_type.lower()
container_types = ['dictionary', 'dict', 'set', 'list', 'array', 'arr']
for option in container_types:
if (option in parsed_type_lowered):
data_type = option
if (data_type in ['dict', 'dictionary']):
data_type = 'object'
default_value = {}
else:
data_type = 'array'
default_value = []
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, json_data_type=data_type, default_value=default_value)
break
else:
if any(((word in parsed_type_lowered) for word in ['str', 'string'])):
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, json_data_type='string')
elif any(((word in parsed_type_lowered) for word in ['int', 'integer', 'number'])):
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, json_data_type='number', default_value=0)
elif any(((word in parsed_type_lowered) for word in ['float'])):
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, json_data_type='number', default_value=0.0)
elif any(((word in parsed_type_lowered) for word in ['bool', 'boolean'])):
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, json_data_type='boolean', default_value=False)
else:
data_type_info = PropertyTypeInfo(parsed_data=parsed_type_lowered, undetermined=True)
from elyra.pipeline.processor import PipelineProcessorManager
if PipelineProcessorManager.instance().supports_pipeline_params(runtime_type=self.component_platform):
data_type_info.allowed_input_types.append('parameter')
return data_type_info |
def test_unique_cache_keys_created_per_thread_with_same_uri():
with ThreadPoolExecutor(max_workers=2) as exc:
test_sessions = [exc.submit(_simulate_call, TEST_URI) for _ in range(2)]
assert (len(request._session_cache._data) == 2)
[session.result().close() for session in test_sessions] |
def run_debug(computer: Computer):
print('----out.py----\n')
print('----DE/WM----')
if (not computer.neofetchwin):
print(('deid: %s' % computer.deid))
print(('wmid: %s' % computer.wmid))
try:
print(('wmline item 0: %s' % computer.wm))
except IndexError:
pass
print('\n----TERMINAL----\n')
print(('fontline: %s' % computer.font))
print(('termid: %s' % computer.terminalid))
print(('termline item 0: %s' % computer.terminal))
print(('themeline: %s' % computer.theme))
if (computer.host != 'Host: N/A'):
print('\n----HOST INFO----\n')
print(('hostline: %s' % computer.host))
if (computer.battery != computer.host):
print(('batteryline: %s' % computer.battery))
print(('resline: %s' % computer.resolution))
print('\n----GPU INFO----\n')
print(('gpuinfo: %s' % computer.gpu))
print(('gpuvendor: %s' % computer.gpuid))
print('\n----CPU INFO----\n')
cpu: List[Cpu_interface] = computer.get_component('CPU:')
if cpu:
print(('cpuvendor: %s' % cpu[0].vendor))
print(('cpumodel: %s' % cpu[0].model))
print(('cpuinfo: %s' % cpu[0].info))
print(('cpuline item 0: %s' % computer.cpu))
print(('memline: %s' % computer.memory))
print('\n----OS INFO----\n')
print(('sysosline: %s' % computer.osinfo))
print(('sysosid: %s' % computer.osinfoid))
print(('diskline: %s' % computer.disks))
if (computer.os != 'windows'):
print(('packagesline item 0: %s' % computer.packages)) |
class SelectionSort(object):
def sort(self, data):
if (data is None):
raise TypeError('data cannot be None')
if (len(data) < 2):
return data
for i in range((len(data) - 1)):
min_index = i
for j in range((i + 1), len(data)):
if (data[j] < data[min_index]):
min_index = j
if (data[min_index] < data[i]):
(data[i], data[min_index]) = (data[min_index], data[i])
return data
def sort_iterative_alt(self, data):
if (data is None):
raise TypeError('data cannot be None')
if (len(data) < 2):
return data
for i in range((len(data) - 1)):
self._swap(data, i, self._find_min_index(data, i))
return data
def sort_recursive(self, data):
if (data is None):
raise TypeError('data cannot be None')
if (len(data) < 2):
return data
return self._sort_recursive(data, start=0)
def _sort_recursive(self, data, start):
if (data is None):
return
if (start < (len(data) - 1)):
swap(data, start, self._find_min_index(data, start))
self._sort_recursive(data, (start + 1))
return data
def _find_min_index(self, data, start):
min_index = start
for i in range((start + 1), len(data)):
if (data[i] < data[min_index]):
min_index = i
return min_index
def _swap(self, data, i, j):
if (i != j):
(data[i], data[j]) = (data[j], data[i])
return data |
_converter(acc_ops.slice_tensor)
def acc_ops_slice(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = kwargs['input']
idx = kwargs['idx']
if isinstance(input_val, (tuple, list)):
return operator.getitem(input_val, idx)
if (not isinstance(input_val, AITTensor)):
raise ValueError(f'Unexpected input for {name}: {input_val}')
rank = input_val._rank()
if (not isinstance(idx, Sequence)):
idx = [idx]
op = dynamic_slice()
def num_slice_types(slices):
return sum((1 for s in slices if (isinstance(s, slice) or isinstance(s, int))))
num_ellipsis = (rank - num_slice_types(idx))
expand_idx = []
for i in idx:
if (i == Ellipsis):
for _ in range(0, num_ellipsis):
expand_idx.append(slice(None, None, None))
else:
expand_idx.append(i)
idx = expand_idx
squeezable_indices = []
num_none_indices = 0
(start, end) = ([], [])
for (index, i) in enumerate(idx):
if (i is None):
squeezable_indices.append((index, unsqueeze))
num_none_indices += 1
continue
if isinstance(i, int):
if isinstance(input_val.shape()[index], IntImm):
i = get_positive_dim(i, input_val.shape()[index].value())
squeezable_indices.append(((index - num_none_indices), squeeze))
if (isinstance(i, slice) and (i.step not in (1, None))):
raise ValueError(f'Slice tensor only support step=1 case, get step={i.step}.')
start.append((i.start if isinstance(i, slice) else i))
end.append((i.stop if isinstance(i, slice) else ((i + 1) if (i is not None) else i)))
while (len(start) < rank):
start.append(0)
end.append(None)
output = op(input_val, start, end)
for (dim, squeeze_func) in reversed(squeezable_indices):
if ((dim > rank) and (squeeze_func == unsqueeze)):
dim = (- 1)
output = squeeze_func(dim)(output)
return output |
.django_db
def test_date_range_search_with_two_ranges(client, monkeypatch, elasticsearch_award_index, awards_over_different_date_ranges):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
contract_type_list = all_award_types_mappings['contracts']
grants_type_list = all_award_types_mappings['grants']
request_with_contracts = {'subawards': False, 'fields': ['Award ID'], 'sort': 'Award ID', 'limit': 50, 'page': 1, 'filters': {'time_period': [{'start_date': '2015-01-01', 'end_date': '2015-12-31'}, {'start_date': '2017-02-01', 'end_date': '2017-11-30'}], 'award_type_codes': contract_type_list}}
resp = client.post('/api/v2/search/spending_by_award/', content_type='application/json', data=json.dumps(request_with_contracts))
assert (resp.status_code == status.HTTP_200_OK)
assert (len(resp.data['results']) == 13)
request_with_grants = {'subawards': False, 'fields': ['Award ID'], 'sort': 'Award ID', 'limit': 50, 'page': 1, 'filters': {'time_period': [{'start_date': '2015-01-01', 'end_date': '2015-12-31'}, {'start_date': '2017-02-01', 'end_date': '2017-11-30'}], 'award_type_codes': grants_type_list}}
resp = client.post('/api/v2/search/spending_by_award/', content_type='application/json', data=json.dumps(request_with_grants))
assert (resp.status_code == status.HTTP_200_OK)
assert (len(resp.data['results']) == 13)
request_for_two_awards = {'subawards': False, 'fields': ['Award ID'], 'sort': 'Award ID', 'limit': 50, 'page': 1, 'filters': {'time_period': [{'start_date': '2014-01-03', 'end_date': '2014-01-08'}, {'start_date': '2018-06-01', 'end_date': '2018-06-23'}], 'award_type_codes': grants_type_list}}
resp = client.post('/api/v2/search/spending_by_award/', content_type='application/json', data=json.dumps(request_for_two_awards))
assert (resp.status_code == status.HTTP_200_OK)
assert (len(resp.data['results']) == 2)
assert (resp.data['results'] == [{'Award ID': 'xyz44', 'internal_id': 44, 'generated_internal_id': 'AWARD_44'}, {'Award ID': 'xyz33', 'internal_id': 33, 'generated_internal_id': 'AWARD_33'}])
request_for_no_awards = {'subawards': False, 'fields': ['Award ID'], 'sort': 'Award ID', 'limit': 50, 'page': 1, 'filters': {'time_period': [{'start_date': '2013-01-03', 'end_date': '2013-01-08'}, {'start_date': '2019-06-01', 'end_date': '2019-06-23'}], 'award_type_codes': grants_type_list}}
resp = client.post('/api/v2/search/spending_by_award/', content_type='application/json', data=json.dumps(request_for_no_awards))
assert (resp.status_code == status.HTTP_200_OK) |
def same_dicts(dict1, dict2):
if (dict1.keys() != dict2.keys()):
return False
for (key, value) in dict1.items():
if isinstance(value, dict):
if (not same_dicts(value, dict2[key])):
return False
elif isinstance(value, list):
if (len(value) != len(dict2[key])):
return False
for (elem1, elem2) in zip(value, dict2[key]):
if isinstance(elem1, T1CharString):
elem1.compile()
elem2.compile()
if (elem1.bytecode != elem2.bytecode):
return False
elif (elem1 != elem2):
return False
elif isinstance(value, T1CharString):
value.compile()
dict2[key].compile()
if (value.bytecode != dict2[key].bytecode):
return False
elif (value != dict2[key]):
return False
return True |
class Status(Enum):
STATUS_IN_TRANS = 1
STATUS_AUTOCOMMIT = 2
MORE_RESULTS_EXISTS = 8
STATUS_NO_GOOD_INDEX_USED = 16
STATUS_NO_INDEX_USED = 32
STATUS_CURSOR_EXISTS = 64
STATUS_LAST_ROW_SENT = 128
STATUS_DB_DROPPED = 256
STATUS_NO_BACKSLASH_ESCAPES = 512
STATUS_METADATA_CHANGED = 1024
QUERY_WAS_SLOW = 2048
PS_OUT_PARAMS = 4096
STATUS_IN_TRANS_READONLY = 8192
SESSION_STATE_CHANGED = 16384 |
class BudgetaryResources(AgencyBase):
endpoint_doc = 'usaspending_api/api_contracts/contracts/v2/agency/toptier_code/budgetary_resources.md'
_response()
def get(self, request, *args, **kwargs):
return Response({'toptier_code': self.toptier_agency.toptier_code, 'agency_data_by_year': self.get_agency_budgetary_resources(), 'messages': self.standard_response_messages})
def get_total_federal_budgetary_resources(self):
submission_windows = DABSSubmissionWindowSchedule.objects.filter(submission_reveal_date__lte=now()).values('submission_fiscal_year').annotate(fiscal_year=F('submission_fiscal_year'), fiscal_period=Max('submission_fiscal_month'))
q = Q()
for sub in submission_windows:
q |= (Q(fiscal_year=sub['fiscal_year']) & Q(fiscal_period=sub['fiscal_period']))
results = GTASSF133Balances.objects.filter(q).values('fiscal_year').annotate(total_budgetary_resources=Sum('total_budgetary_resources_cpe')).values('fiscal_year', 'total_budgetary_resources')
return results
def get_periods_by_year(self):
periods = {}
fabpaoc = FinancialAccountsByProgramActivityObjectClass.objects.filter(treasury_account__funding_toptier_agency=self.toptier_agency, submission__submission_window__submission_reveal_date__lte=now()).values('submission__reporting_fiscal_year', 'submission__reporting_fiscal_period').annotate(fiscal_year=F('submission__reporting_fiscal_year'), fiscal_period=F('submission__reporting_fiscal_period'), obligation_sum=Sum('obligations_incurred_by_program_object_class_cpe')).order_by('fiscal_year', 'fiscal_period')
for val in fabpaoc:
if ((val['fiscal_year'] < 2022) and (val['fiscal_period'] not in (3, 6, 9, 12))):
continue
if (periods.get(val['fiscal_year']) is not None):
periods[val['fiscal_year']].append({'period': val['fiscal_period'], 'obligated': val['obligation_sum']})
else:
periods.update({val['fiscal_year']: [{'period': val['fiscal_period'], 'obligated': val['obligation_sum']}]})
return periods
def get_agency_budgetary_resources(self):
aab = AppropriationAccountBalances.objects.filter(treasury_account_identifier__funding_toptier_agency=self.toptier_agency, submission__submission_window__submission_reveal_date__lte=now(), submission__is_final_balances_for_fy=True).values('submission__reporting_fiscal_year').annotate(agency_budgetary_resources=Sum('total_budgetary_resources_amount_cpe'))
aab_by_year = {val['submission__reporting_fiscal_year']: val for val in aab}
fabpaoc = FinancialAccountsByProgramActivityObjectClass.objects.filter(treasury_account__funding_toptier_agency=self.toptier_agency, submission__submission_window__submission_reveal_date__lte=now(), submission__is_final_balances_for_fy=True).values('submission__reporting_fiscal_year').annotate(agency_total_obligated=Sum('obligations_incurred_by_program_object_class_cpe'))
fabpaoc_by_year = {val['submission__reporting_fiscal_year']: val for val in fabpaoc}
fbr = self.get_total_federal_budgetary_resources()
resources = {val['fiscal_year']: val['total_budgetary_resources'] for val in fbr}
periods_by_year = self.get_periods_by_year()
results = []
for year in range(2017, (current_fiscal_year() + 1)):
if (year not in aab_by_year):
results.append({'fiscal_year': year, 'agency_budgetary_resources': None, 'agency_total_obligated': None, 'total_budgetary_resources': resources.get(year), 'agency_obligation_by_period': []})
else:
results.append({'fiscal_year': year, 'agency_budgetary_resources': aab_by_year[year]['agency_budgetary_resources'], 'agency_total_obligated': fabpaoc_by_year.get(year, {}).get('agency_total_obligated'), 'total_budgetary_resources': resources.get(year), 'agency_obligation_by_period': periods_by_year.get(year, [])})
return sorted(results, key=(lambda x: x['fiscal_year']), reverse=True) |
_comparable
class Post(HideableCRUDMixin, db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
topic_id = db.Column(db.Integer, db.ForeignKey('topics.id', ondelete='CASCADE', use_alter=True), nullable=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
username = db.Column(db.String(200), nullable=False)
content = db.Column(db.Text, nullable=False)
date_created = db.Column(UTCDateTime(timezone=True), default=time_utcnow, nullable=False)
date_modified = db.Column(UTCDateTime(timezone=True), nullable=True)
modified_by = db.Column(db.String(200), nullable=True)
def url(self):
return url_for('forum.view_post', post_id=self.id)
def __init__(self, content=None, user=None, topic=None):
if content:
self.content = content
if user:
self.user_id = user.id
self.username = user.username
if topic:
self.topic_id = (topic if isinstance(topic, int) else topic.id)
self.date_created = time_utcnow()
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self.id)
def is_first_post(self):
return self.topic.is_first_post(self)
def save(self, user=None, topic=None):
current_app.pluggy.hook.flaskbb_event_post_save_before(post=self)
if self.id:
db.session.add(self)
db.session.commit()
current_app.pluggy.hook.flaskbb_event_post_save_after(post=self, is_new=False)
return self
if (user and topic):
with db.session.no_autoflush:
created = time_utcnow()
self.user = user
self.username = user.username
self.topic = topic
self.date_created = created
if (not topic.hidden):
topic.last_updated = created
topic.last_post = self
topic.forum.last_post = self
topic.forum.last_post_user = self.user
topic.forum.last_post_title = topic.title
topic.forum.last_post_username = user.username
topic.forum.last_post_created = created
user.post_count += 1
topic.post_count += 1
topic.forum.post_count += 1
db.session.add(self)
db.session.commit()
current_app.pluggy.hook.flaskbb_event_post_save_after(post=self, is_new=True)
return self
def delete(self):
if (self.topic.first_post == self):
self.topic.delete()
return self
db.session.delete(self)
self._deal_with_last_post()
self._update_counts()
db.session.commit()
return self
def hide(self, user):
if self.hidden:
return
if (self.topic.first_post == self):
self.topic.hide(user)
return self
super(Post, self).hide(user)
self._deal_with_last_post()
self._update_counts()
db.session.commit()
return self
def unhide(self):
if (not self.hidden):
return
if (self.topic.first_post == self):
self.topic.unhide()
return self
self._restore_post_to_topic()
super(Post, self).unhide()
self._update_counts()
db.session.commit()
return self
def _deal_with_last_post(self):
if (self.topic.last_post == self):
if (self.topic.last_post == self.topic.forum.last_post):
second_last_post = Post.query.filter((Post.topic_id == Topic.id), (Topic.forum_id == self.topic.forum.id), (Post.hidden != True), (Post.id != self.id)).order_by(Post.id.desc()).limit(1).first()
if second_last_post:
self.topic.forum.last_post = second_last_post
self.topic.forum.last_post_title = second_last_post.topic.title
self.topic.forum.last_post_user = second_last_post.user
self.topic.forum.last_post_username = second_last_post.username
self.topic.forum.last_post_created = second_last_post.date_created
else:
self.topic.forum.last_post = None
self.topic.forum.last_post_title = None
self.topic.forum.last_post_user = None
self.topic.forum.last_post_username = None
self.topic.forum.last_post_created = None
if (self.topic.second_last_post is not None):
self.topic.last_post_id = self.topic.second_last_post
else:
self.topic.last_post = self.topic.first_post
self.topic.last_updated = self.topic.last_post.date_created
def _update_counts(self):
if self.hidden:
clauses = [(Post.hidden != True), (Post.id != self.id)]
else:
clauses = [db.or_((Post.hidden != True), (Post.id == self.id))]
user_post_clauses = (clauses + [(Post.user_id == self.user.id), (Topic.id == Post.topic_id), (Topic.hidden != True)])
self.user.post_count = Post.query.filter(*user_post_clauses).count()
if self.topic.hidden:
self.topic.post_count = 0
else:
topic_post_clauses = (clauses + [(Post.topic_id == self.topic.id)])
self.topic.post_count = Post.query.filter(*topic_post_clauses).count()
forum_post_clauses = (clauses + [(Post.topic_id == Topic.id), (Topic.forum_id == self.topic.forum.id), (Topic.hidden != True)])
self.topic.forum.post_count = Post.query.filter(*forum_post_clauses).count()
def _restore_post_to_topic(self):
last_unhidden_post = Post.query.filter((Post.topic_id == self.topic_id), (Post.id != self.id), (Post.hidden != True)).limit(1).first()
if (last_unhidden_post and (self.date_created > last_unhidden_post.date_created)):
self.topic.last_post = self
self.second_last_post = last_unhidden_post
if ((not self.topic.hidden) and ((not self.topic.forum.last_post) or (self.date_created > self.topic.forum.last_post.date_created))):
self.topic.forum.last_post = self
self.topic.forum.last_post_title = self.topic.title
self.topic.forum.last_post_user = self.user
self.topic.forum.last_post_username = self.username
self.topic.forum.last_post_created = self.date_created |
class OptionPlotoptionsVariwideDragdrop(Options):
def draggableX(self):
return self._config_get(None)
def draggableX(self, flag: bool):
self._config(flag, js_type=False)
def draggableY(self):
return self._config_get(None)
def draggableY(self, flag: bool):
self._config(flag, js_type=False)
def dragHandle(self) -> 'OptionPlotoptionsVariwideDragdropDraghandle':
return self._config_sub_data('dragHandle', OptionPlotoptionsVariwideDragdropDraghandle)
def dragMaxX(self):
return self._config_get(None)
def dragMaxX(self, num: float):
self._config(num, js_type=False)
def dragMaxY(self):
return self._config_get(None)
def dragMaxY(self, num: float):
self._config(num, js_type=False)
def dragMinX(self):
return self._config_get(None)
def dragMinX(self, num: float):
self._config(num, js_type=False)
def dragMinY(self):
return self._config_get(None)
def dragMinY(self, num: float):
self._config(num, js_type=False)
def dragPrecisionX(self):
return self._config_get(0)
def dragPrecisionX(self, num: float):
self._config(num, js_type=False)
def dragPrecisionY(self):
return self._config_get(0)
def dragPrecisionY(self, num: float):
self._config(num, js_type=False)
def dragSensitivity(self):
return self._config_get(2)
def dragSensitivity(self, num: float):
self._config(num, js_type=False)
def groupBy(self):
return self._config_get(None)
def groupBy(self, text: str):
self._config(text, js_type=False)
def guideBox(self) -> 'OptionPlotoptionsVariwideDragdropGuidebox':
return self._config_sub_data('guideBox', OptionPlotoptionsVariwideDragdropGuidebox)
def liveRedraw(self):
return self._config_get(True)
def liveRedraw(self, flag: bool):
self._config(flag, js_type=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.