code stringlengths 281 23.7M |
|---|
class Task():
def __call__(self):
print('exporting PyTorch distributed environment variables')
dist_env = submitit.helpers.TorchDistributedEnvironment().export()
print(f'master: {dist_env.master_addr}:{dist_env.master_port}')
print(f'rank: {dist_env.rank}')
print(f'world size: {dist_env.world_size}')
print(f'local rank: {dist_env.local_rank}')
print(f'local world size: {dist_env.local_world_size}')
torch.distributed.init_process_group(backend='nccl')
assert (dist_env.rank == torch.distributed.get_rank())
assert (dist_env.world_size == torch.distributed.get_world_size())
tensor = (dist_env.rank * torch.ones(1).cuda())
time.sleep(120)
torch.distributed.all_reduce(tensor)
if (dist_env.rank == 0):
result = list(tensor)
print(result)
return result
def checkpoint(self):
print('checkpointing')
return submitit.helpers.DelayedSubmission(self) |
class OptionSeriesGaugeSonificationDefaultinstrumentoptionsMapping(Options):
def frequency(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingFrequency':
return self._config_sub_data('frequency', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingFrequency)
def gapBetweenNotes(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingGapbetweennotes)
def highpass(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingHighpass':
return self._config_sub_data('highpass', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingHighpass)
def lowpass(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingLowpass':
return self._config_sub_data('lowpass', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingLowpass)
def noteDuration(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingNoteduration':
return self._config_sub_data('noteDuration', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingNoteduration)
def pan(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPan':
return self._config_sub_data('pan', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPan)
def pitch(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingPlaydelay)
def time(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingTime)
def tremolo(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingTremolo':
return self._config_sub_data('tremolo', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingTremolo)
def volume(self) -> 'OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesGaugeSonificationDefaultinstrumentoptionsMappingVolume) |
class OptionPlotoptionsPictorialSonificationContexttracksMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class RegisterGenericAliasCallableField(RegisterFieldTemplate):
def __init__(self, salt: str, key: ByteString):
super(RegisterGenericAliasCallableField, self).__init__(salt, key)
def handle_attribute_from_config(self, attr_space: AttributeSpace, builder_space: BuilderSpace):
typed = attr_space.attribute.metadata['type']
out = None
if ((_get_name_py_version(typed) == 'List') or (_get_name_py_version(typed) == 'Tuple')):
out = []
for v in builder_space.arguments[attr_space.config_space.name][attr_space.attribute.name]:
out.append(_recurse_callables(v, _str_2_callable))
elif (_get_name_py_version(typed) == 'Dict'):
out = {}
for (k, v) in builder_space.arguments[attr_space.config_space.name][attr_space.attribute.name].items():
out.update({k: _recurse_callables(v, _str_2_callable)})
attr_space.field = out
def handle_optional_attribute_type(self, attr_space: AttributeSpace, builder_space: BuilderSpace):
raise _SpockNotOptionalError(f'Parameter `{attr_space.attribute.name}` within `{attr_space.config_space.name}` is of type `{type(attr_space.attribute.type)}` which seems to be unsupported -- are you missing an decorator on a base python class?') |
def test_object_nonexisting(f):
with pytest.raises(ValueError) as exc:
_ = f.object('UNKNOWN_TYPE', 'SOME_OBJECT', 0, 0)
assert ('Object not found: type=UNKNOWN_TYPE, name=SOME_OBJECT, origin=0, copynumber=0' in str(exc.value))
with pytest.raises(ValueError):
_ = f.object('CHANNEL', 'CHANN1', 11, 0)
with pytest.raises(ValueError):
_ = f.object('CHANNEL', 'CHANN1', '-1', '-1') |
def transform_correlation_to_index(df: pd.DataFrame) -> pd.DataFrame:
if ('correlation' not in df.columns):
return df
corr_df = df.loc[(df['correlation'].ne((- 1)), ['index', 'correlation', 'stream'])]
on_cpu = corr_df.loc[df['stream'].eq((- 1))]
on_gpu = corr_df.loc[df['stream'].ne((- 1))]
merged_cpu_idx = on_cpu.merge(on_gpu, on='correlation', how='inner')
merged_gpu_idx = on_gpu.merge(on_cpu, on='correlation', how='inner')
matched = pd.concat([merged_cpu_idx, merged_gpu_idx], axis=0)[['index_x', 'index_y']].set_index('index_x')
corr_index_map: Dict[(int, int)] = matched['index_y'].to_dict()
def _set_corr_index(row):
idx = row.name
if (idx in corr_index_map):
return corr_index_map[idx]
elif (df.loc[(idx, 'correlation')] == (- 1)):
return (- 1)
else:
return 0
df['index_correlation'] = df.apply(_set_corr_index, axis=1)
df['index_correlation'] = pd.to_numeric(df['index_correlation'], downcast='integer')
return df |
class TokenBucket():
def __init__(self, rate: Union[(int, float)], capacity: Union[(int, float)]) -> None:
self._rate = rate
self._capacity = capacity
self._num_tokens = self._capacity
self._last_refill = time.perf_counter()
self._seconds_per_token = (1 / self._rate)
self._take_lock = asyncio.Lock()
async def __aiter__(self) -> AsyncGenerator[(None, None)]:
while True:
(await self.take())
(yield)
def get_num_tokens(self) -> float:
return max(0, self._get_num_tokens(time.perf_counter()))
def _get_num_tokens(self, when: float) -> float:
return min(self._capacity, (self._num_tokens + (self._rate * (when - self._last_refill))))
def _take(self, num: Union[(int, float)]=1) -> None:
now = time.perf_counter()
if (num < 0):
raise ValueError('Cannot take negative token quantity')
self._num_tokens = self._get_num_tokens(now)
self._last_refill = now
self._num_tokens -= num
async def take(self, num: Union[(int, float)]=1) -> None:
async with self._take_lock:
self._take(num)
if (self._num_tokens < 0):
sleep_for = (abs(self._num_tokens) * self._seconds_per_token)
(await asyncio.sleep(sleep_for))
def take_nowait(self, num: Union[(int, float)]=1) -> None:
num_tokens = self.get_num_tokens()
if (num_tokens >= num):
self._take(num)
else:
raise NotEnoughTokens(f'Insufficient capacity. Needed {num:.2f} but only has {num_tokens:.2f}')
def can_take(self, num: Union[(int, float)]=1) -> bool:
return (num <= self.get_num_tokens()) |
class Structure(BaseType):
def __init__(self, cstruct, name, fields=None, anonymous=False):
super().__init__(cstruct)
self.name = name
self.size = None
self.lookup = OrderedDict()
self.fields = fields
self.anonymous = anonymous
for field in self.fields:
self.lookup[field.name] = field
self._calc_offsets()
def __len__(self):
if (self.size is None):
self.size = self._calc_size()
return self.size
def __repr__(self):
return '<Structure {}>'.format(self.name)
def _calc_offsets(self):
offset = 0
bits_type = None
bits_remaining = 0
for field in self.fields:
if field.bits:
if ((bits_remaining == 0) or (field.type != bits_type)):
bits_type = field.type
bits_remaining = (bits_type.size * 8)
if (offset is not None):
field.offset = offset
offset += bits_type.size
else:
field.offset = None
bits_remaining -= field.bits
continue
field.offset = offset
if (offset is not None):
try:
offset += len(field.type)
except TypeError:
offset = None
def _calc_size(self):
size = 0
bits_type = None
bits_remaining = 0
for field in self.fields:
if field.bits:
if ((bits_remaining == 0) or (field.type != bits_type)):
bits_type = field.type
bits_remaining = (bits_type.size * 8)
size += bits_type.size
bits_remaining -= field.bits
continue
field_len = len(field.type)
size += field_len
if (field.offset is not None):
size = max(size, (field.offset + field_len))
return size
def _read(self, stream, *args, **kwargs):
bit_buffer = BitBuffer(stream, self.cstruct.endian)
struct_start = stream.tell()
result = OrderedDict()
sizes = {}
for field in self.fields:
start = stream.tell()
field_type = self.cstruct.resolve(field.type)
if field.offset:
if (start != (struct_start + field.offset)):
stream.seek((struct_start + field.offset))
start = (struct_start + field.offset)
if field.bits:
result[field.name] = bit_buffer.read(field_type, field.bits)
continue
else:
bit_buffer.reset()
if isinstance(field_type, (Array, Pointer)):
v = field_type._read(stream, result)
else:
v = field_type._read(stream)
if (isinstance(field_type, Structure) and field_type.anonymous):
sizes.update(v._sizes)
result.update(v._values)
else:
sizes[field.name] = (stream.tell() - start)
result[field.name] = v
return Instance(self, result, sizes)
def _write(self, stream, data):
bit_buffer = BitBuffer(stream, self.cstruct.endian)
num = 0
for field in self.fields:
offset = stream.tell()
if field.bits:
bit_buffer.write(field.type, getattr(data, field.name), field.bits)
continue
if bit_buffer._type:
bit_buffer.flush()
if (isinstance(field.type, Structure) and field.type.anonymous):
field.type._write(stream, data)
else:
field.type._write(stream, getattr(data, field.name))
num += (stream.tell() - offset)
if bit_buffer._type:
bit_buffer.flush()
return num
def add_field(self, name, type_, offset=None):
field = Field(name, type_, offset=offset)
self.fields.append(field)
self.lookup[name] = field
self.size = None
def default(self):
result = OrderedDict()
for field in self.fields:
result[field.name] = field.type.default()
return Instance(self, result)
def show(self, indent=0):
if (indent == 0):
print('struct {}'.format(self.name))
for field in self.fields:
if (field.offset is None):
offset = '0x??'
else:
offset = '0x{:02x}'.format(field.offset)
print('{}+{} {} {}'.format((' ' * indent), offset, field.name, field.type))
if isinstance(field.type, Structure):
field.type.show((indent + 1)) |
def plot_data(column, i, title):
crs = ccrs.PlateCarree()
ax = plt.subplot(2, 2, i, projection=ccrs.Mercator())
ax.set_title(title)
maxabs = vd.maxabs(data.air_temperature_c)
mappable = ax.scatter(data.longitude, data.latitude, c=data[column], s=50, cmap='seismic', vmin=(- maxabs), vmax=maxabs, transform=crs)
vd.datasets.setup_texas_wind_map(ax)
return mappable |
class OptionPlotoptionsDependencywheelSonificationContexttracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
.parametrize('status', UpdateStatus)
('os.path.exists', side_effect=OSError('os_error'))
('os.path.expanduser', return_value=temp_dir)
('subprocess.check_call')
('Updater.sdlog.error')
('Updater.sdlog.info')
def test_write_updates_status_flag_to_disk_failure_dom0(mocked_info, mocked_error, mocked_call, mocked_expand, mocked_open, status):
error_calls = [call('Error writing update status flag to dom0'), call('os_error')]
updater._write_updates_status_flag_to_disk(status)
mocked_error.assert_has_calls(error_calls) |
('rule-search')
('query', required=False)
('--columns', '-c', multiple=True, help='Specify columns to add the table')
('--language', type=click.Choice(['eql', 'kql']), default='kql')
('--count', is_flag=True, help='Return a count rather than table')
def search_rules(query, columns, language, count, verbose=True, rules: Dict[(str, TOMLRule)]=None, pager=False):
from kql import get_evaluator
from eql.table import Table
from eql.build import get_engine
from eql import parse_query
from eql.pipes import CountPipe
from .rule import get_unique_query_fields
flattened_rules = []
rules = (rules or {str(rule.path): rule for rule in RuleCollection.default()})
for (file_name, rule) in rules.items():
flat: dict = {'file': os.path.relpath(file_name)}
flat.update(rule.contents.to_dict())
flat.update(flat['metadata'])
flat.update(flat['rule'])
tactic_names = []
technique_ids = []
subtechnique_ids = []
for entry in flat['rule'].get('threat', []):
if (entry['framework'] != 'MITRE ATT&CK'):
continue
techniques = entry.get('technique', [])
tactic_names.append(entry['tactic']['name'])
technique_ids.extend([t['id'] for t in techniques])
subtechnique_ids.extend([st['id'] for t in techniques for st in t.get('subtechnique', [])])
flat.update(techniques=technique_ids, tactics=tactic_names, subtechniques=subtechnique_ids, unique_fields=get_unique_query_fields(rule))
flattened_rules.append(flat)
flattened_rules.sort(key=(lambda dct: dct['name']))
filtered = []
if (language == 'kql'):
evaluator = (get_evaluator(query) if query else (lambda x: True))
filtered = list(filter(evaluator, flattened_rules))
elif (language == 'eql'):
parsed = parse_query(query, implied_any=True, implied_base=True)
evaluator = get_engine(parsed)
filtered = [result.events[0].data for result in evaluator(flattened_rules)]
if ((not columns) and any((isinstance(pipe, CountPipe) for pipe in parsed.pipes))):
columns = ['key', 'count', 'percent']
if count:
click.echo(f'{len(filtered)} rules')
return filtered
if columns:
columns = ','.join(columns).split(',')
else:
columns = ['rule_id', 'file', 'name']
table = Table.from_list(columns, filtered)
if verbose:
(click.echo_via_pager(table) if pager else click.echo(table))
return filtered |
def mock_user_db_oauth(user_oauth: UserOAuthModel, verified_user_oauth: UserOAuthModel, inactive_user_oauth: UserOAuthModel, superuser_oauth: UserOAuthModel, verified_superuser_oauth: UserOAuthModel) -> BaseUserDatabase[(UserOAuthModel, IDType)]:
class MockUserDatabase(BaseUserDatabase[(UserOAuthModel, IDType)]):
async def get(self, id: UUID4) -> Optional[UserOAuthModel]:
if (id == user_oauth.id):
return user_oauth
if (id == verified_user_oauth.id):
return verified_user_oauth
if (id == inactive_user_oauth.id):
return inactive_user_oauth
if (id == superuser_oauth.id):
return superuser_oauth
if (id == verified_superuser_oauth.id):
return verified_superuser_oauth
return None
async def get_by_email(self, email: str) -> Optional[UserOAuthModel]:
lower_email = email.lower()
if (lower_email == user_oauth.email.lower()):
return user_oauth
if (lower_email == verified_user_oauth.email.lower()):
return verified_user_oauth
if (lower_email == inactive_user_oauth.email.lower()):
return inactive_user_oauth
if (lower_email == superuser_oauth.email.lower()):
return superuser_oauth
if (lower_email == verified_superuser_oauth.email.lower()):
return verified_superuser_oauth
return None
async def get_by_oauth_account(self, oauth: str, account_id: str) -> Optional[UserOAuthModel]:
user_oauth_account = user_oauth.oauth_accounts[0]
if ((user_oauth_account.oauth_name == oauth) and (user_oauth_account.account_id == account_id)):
return user_oauth
inactive_user_oauth_account = inactive_user_oauth.oauth_accounts[0]
if ((inactive_user_oauth_account.oauth_name == oauth) and (inactive_user_oauth_account.account_id == account_id)):
return inactive_user_oauth
return None
async def create(self, create_dict: Dict[(str, Any)]) -> UserOAuthModel:
return UserOAuthModel(**create_dict)
async def update(self, user: UserOAuthModel, update_dict: Dict[(str, Any)]) -> UserOAuthModel:
for (field, value) in update_dict.items():
setattr(user, field, value)
return user
async def delete(self, user: UserOAuthModel) -> None:
pass
async def add_oauth_account(self, user: UserOAuthModel, create_dict: Dict[(str, Any)]) -> UserOAuthModel:
oauth_account = OAuthAccountModel(**create_dict)
user.oauth_accounts.append(oauth_account)
return user
async def update_oauth_account(self, user: UserOAuthModel, oauth_account: OAuthAccountModel, update_dict: Dict[(str, Any)]) -> UserOAuthModel:
for (field, value) in update_dict.items():
setattr(oauth_account, field, value)
updated_oauth_accounts = []
for existing_oauth_account in user.oauth_accounts:
if ((existing_oauth_account.account_id == oauth_account.account_id) and (existing_oauth_account.oauth_name == oauth_account.oauth_name)):
updated_oauth_accounts.append(oauth_account)
else:
updated_oauth_accounts.append(existing_oauth_account)
return user
return MockUserDatabase() |
class TestIssue62():
def test_00(self):
import srsly.ruamel_yaml
s = dedent(' {}# Outside flow collection:\n - ::vector\n - ": - ()"\n - Up, up, and away!\n - -123\n - # Inside flow collection:\n - [::vector, ": - ()", "Down, down and away!", -456, ')
with pytest.raises(srsly.ruamel_yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
round_trip(s.format(''), preserve_quotes=True)
def test_00_single_comment(self):
import srsly.ruamel_yaml
s = dedent(' {}# Outside flow collection:\n - ::vector\n - ": - ()"\n - Up, up, and away!\n - -123\n - - [::vector, ": - ()", "Down, down and away!", -456, ')
with pytest.raises(srsly.ruamel_yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
round_trip(s.format(''), preserve_quotes=True)
def test_01(self):
import srsly.ruamel_yaml
s = dedent(' {}[random plain value that contains a ? character]\n ')
with pytest.raises(srsly.ruamel_yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
round_trip(s.format(''), preserve_quotes=True)
round_trip(s.format('%YAML 1.2\n--- '), preserve_quotes=True, version='1.2')
def test_so_(self):
round_trip_load('{"in":{},"out":{}}') |
class TestSalesGPT():
def test_valid_inference_no_tools(self, load_env):
llm = ChatLiteLLM(temperature=0.9)
sales_agent = SalesGPT.from_llm(llm, verbose=False, use_tools=False, salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven \n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible. \n We offer a range of high-quality mattresses,\n pillows, and bedding accessories \n that are designed to meet the unique \n needs of our customers.')
sales_agent.seed_agent()
sales_agent.determine_conversation_stage()
sales_agent.step()
agent_output = sales_agent.conversation_history[(- 1)]
assert (agent_output is not None), 'Agent output cannot be None.'
assert isinstance(agent_output, str), 'Agent output needs to be of type str'
assert (len(agent_output) > 0), 'Length of output needs to be greater than 0.'
def test_valid_inference_with_tools(self, load_env):
llm = ChatLiteLLM(temperature=0.9)
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_data')
sales_agent = SalesGPT.from_llm(llm, verbose=False, use_tools='True', product_catalog=f'{data_dir}/sample_product_catalog.txt', salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven \n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible. \n We offer a range of high-quality mattresses,\n pillows, and bedding accessories \n that are designed to meet the unique \n needs of our customers.')
sales_agent.seed_agent()
sales_agent.determine_conversation_stage()
sales_agent.step()
agent_output = sales_agent.conversation_history[(- 1)]
assert (agent_output is not None), 'Agent output cannot be None.'
assert isinstance(agent_output, str), 'Agent output needs to be of type str'
assert (len(agent_output) > 0), 'Length of output needs to be greater than 0.'
def test_valid_inference_stream(self, load_env):
llm = ChatLiteLLM(temperature=0.9, model_name='gpt-3.5-turbo')
sales_agent = SalesGPT.from_llm(llm, verbose=False, salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven \n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible. \n We offer a range of high-quality mattresses,\n pillows, and bedding accessories \n that are designed to meet the unique \n needs of our customers.')
sales_agent.seed_agent()
sales_agent.determine_conversation_stage()
stream_generator = sales_agent.step(stream=True)
agent_output = ''
for chunk in stream_generator:
token = (chunk['choices'][0]['delta'].get('content', '') or '')
agent_output += token
assert (agent_output is not None), 'Agent output cannot be None.'
assert isinstance(agent_output, str), 'Agent output needs to be of type str'
assert (len(agent_output) > 0), 'Length of output needs to be greater than 0.'
.asyncio
async def test_valid_async_inference_stream(self, load_env):
llm = ChatLiteLLM(temperature=0.9)
model_name = 'gpt-3.5-turbo'
sales_agent = SalesGPT.from_llm(llm, verbose=False, salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven \n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible. \n We offer a range of high-quality mattresses,\n pillows, and bedding accessories \n that are designed to meet the unique \n needs of our customers.')
sales_agent.seed_agent()
sales_agent.determine_conversation_stage()
astream_generator = (await sales_agent.astep(stream=True))
import inspect
is_async_generator = inspect.isasyncgen(astream_generator)
assert (is_async_generator == True), 'This needs to be an async generator!'
agent_output = ''
async for chunk in astream_generator:
token = (chunk['choices'][0]['delta'].get('content', '') or '')
agent_output += token
assert (agent_output is not None), 'Agent output cannot be None.'
assert isinstance(agent_output, str), 'Agent output needs to be of type str'
assert (len(agent_output) > 0), 'Length of output needs to be greater than 0.'
def test_accept_json_or_args_config(self, load_env):
llm = ChatLiteLLM()
sales_agent_passing_str = SalesGPT.from_llm(llm, verbose=False, use_tools='True', product_catalog='tests/test_data/sample_product_catalog.txt', salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven\n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible.\n We offer a range of high-quality mattresses,\n pillows, and bedding accessories\n that are designed to meet the unique\n needs of our customers.')
assert isinstance(sales_agent_passing_str, SalesGPT)
assert (sales_agent_passing_str.seed_agent() is None)
assert (sales_agent_passing_str.step() is None)
sales_agent_passing_bool = SalesGPT.from_llm(llm, verbose=False, use_tools=True, product_catalog='tests/test_data/sample_product_catalog.txt', salesperson_name='Ted Lasso', salesperson_role='Sales Representative', company_name='Sleep Haven', company_business='Sleep Haven\n is a premium mattress company that provides\n customers with the most comfortable and\n supportive sleeping experience possible.\n We offer a range of high-quality mattresses,\n pillows, and bedding accessories\n that are designed to meet the unique\n needs of our customers.')
assert isinstance(sales_agent_passing_bool, SalesGPT)
assert (sales_agent_passing_bool.seed_agent() is None)
assert (sales_agent_passing_bool.step() is None) |
class OptionSeriesSplineSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class PN532_I2C(PN532):
def __init__(self, irq=None, reset=None, req=None, debug=False, i2c_c=1):
self.debug = debug
self._gpio_init(irq=irq, req=req, reset=reset)
self._i2c = I2CDevice(i2c_c, I2C_ADDRESS)
super().__init__(debug=debug, reset=reset)
def _gpio_init(self, reset, irq=None, req=None):
self._irq = irq
self._req = req
GPIO.setmode(GPIO.BCM)
if reset:
GPIO.setup(reset, GPIO.OUT)
GPIO.output(reset, True)
if irq:
GPIO.setup(irq, GPIO.IN)
if req:
GPIO.setup(req, GPIO.OUT)
GPIO.output(req, True)
def _reset(self, pin):
if pin:
GPIO.output(pin, True)
time.sleep(0.1)
GPIO.output(pin, False)
time.sleep(0.5)
GPIO.output(pin, True)
time.sleep(0.1)
def _wakeup(self):
if self._req:
GPIO.output(self._req, True)
time.sleep(0.1)
GPIO.output(self._req, False)
time.sleep(0.1)
GPIO.output(self._req, True)
time.sleep(0.5)
def _wait_ready(self, timeout=10):
time.sleep(0.01)
status = bytearray(1)
timestamp = time.monotonic()
while ((time.monotonic() - timestamp) < timeout):
try:
status[0] = self._i2c.read(1)[0]
except OSError:
self._wakeup()
continue
if (status == b'\x01'):
return True
time.sleep(0.005)
return False
def _read_data(self, count):
try:
status = self._i2c.read(1)[0]
if (status != 1):
raise BusyError
frame = bytes(self._i2c.read((count + 1)))
except OSError as err:
if self.debug:
print(err)
return
if self.debug:
print('Reading: ', [hex(i) for i in frame[1:]])
else:
time.sleep(0.1)
return frame[1:]
def _write_data(self, framebytes):
self._i2c.write(framebytes) |
def read_response_file(file_name: (str or None), to_json: bool=True) -> (json or bytes or None):
if (file_name is None):
return None
import os
directory = os.path.dirname(__file__)
file_path = os.path.join(directory, 'api_responses', file_name)
if (not os.path.isfile(file_path)):
raise AttributeError("Couldn't find file containing response mock data: {}".format(file_path))
mode = ('r' if to_json else 'rb')
with open(file_path, mode) as myfile:
api_response_text = myfile.read()
return (json.loads(api_response_text) if to_json else api_response_text) |
class PyTorchEstimatorFlinkStreamDataset(FlinkStreamDataset):
def __init__(self, context: PyTorchContext):
super().__init__(context)
self.pytorch_context = context
def parse_record(self, record):
input_cols: List[str] = self.pytorch_context.get_property(INPUT_COL_NAMES).split(',')
input_types: List[str] = self.pytorch_context.get_property(INPUT_TYPES).split(',')
feature_cols: List[str] = self.pytorch_context.get_property(FEATURE_COLS).split(',')
label_col = self.pytorch_context.get_property(LABEL_COL)
df = pd.read_csv(StringIO(record), header=None, names=input_cols)
feature_tensors = [torch.tensor([df[key][0]], dtype=DL_ON_FLINK_TYPE_TO_PYTORCH_TYPE[input_types[idx]]) for (idx, key) in enumerate(feature_cols)]
label_tensor = torch.tensor([df[label_col][0]], dtype=DL_ON_FLINK_TYPE_TO_PYTORCH_TYPE[input_types[input_cols.index(label_col)]])
return (feature_tensors, label_tensor) |
.parametrize('resources', parameterizers.LIST_OF_RESOURCES)
def test_container(resources):
obj = task.Container('my_image', ['this', 'is', 'a', 'cmd'], ['this', 'is', 'an', 'arg'], resources, {'a': 'b'}, {'d': 'e'})
(obj.image == 'my_image')
(obj.command == ['this', 'is', 'a', 'cmd'])
(obj.args == ['this', 'is', 'an', 'arg'])
(obj.resources == resources)
(obj.env == {'a': 'b'})
(obj.config == {'d': 'e'})
assert (obj == task.Container.from_flyte_idl(obj.to_flyte_idl())) |
class CustomSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, session_id=None, new=False, was_invalid=False, expires=0):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.session_id = session_id
self.expires = expires
self.new = new
self.was_invalid = was_invalid
self.modified = False |
.skipif((backend_default == 'numba'), reason='Not supported by Numba')
def test_jit_method2():
from _transonic_testing.for_test_justintime import MyClass2
obj = MyClass2()
obj.check()
if (not can_import_accelerator()):
return
obj = MyClass2()
obj.check()
wait_for_all_extensions()
obj.check() |
class arraydata(TestCase):
def _check(self, array, dtype):
arraydata = nutils.types.arraydata(array)
self.assertEqual(arraydata.shape, array.shape)
self.assertEqual(arraydata.ndim, array.ndim)
self.assertEqual(arraydata.dtype, dtype)
self.assertAllEqual(numpy.asarray(arraydata), array)
def test_bool(self):
self._check(numpy.array([True, False, True]), bool)
def test_int(self):
for d in ('int32', 'int64', 'uint32'):
with self.subTest(d):
self._check(numpy.array([[1, 2, 3], [4, 5, 6]], dtype=d), int)
def test_float(self):
for d in ('float32', 'float64'):
with self.subTest(d):
self._check(numpy.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=d), float)
def test_complex(self):
self._check(numpy.array([(1 + 2j), (3 + 4j)]), complex)
def test_rewrap(self):
w = nutils.types.arraydata(numpy.array([1, 2, 3]))
self.assertIs(w, nutils.types.arraydata(w))
def test_pickle(self):
import pickle
orig = nutils.types.arraydata([1, 2, 3])
s = pickle.dumps(orig)
unpickled = pickle.loads(s)
self.assertEqual(orig, unpickled)
self.assertAllEqual(numpy.asarray(unpickled), [1, 2, 3])
def test_hash(self):
a = nutils.types.arraydata(numpy.array([1, 2, 3], dtype=numpy.int32))
b = nutils.types.arraydata(numpy.array([1, 2, 3], dtype=numpy.int64))
c = nutils.types.arraydata(numpy.array([[1, 2, 3]], dtype=numpy.int64))
self.assertEqual(hash(a), hash(b))
self.assertEqual(a, b)
self.assertNotEqual(hash(a), hash(c))
self.assertNotEqual(a, c) |
.django_db
def test_update_assistance_linkages_uri():
models_to_mock = [{'model': AwardSearch, 'award_id': 999, 'uri': 'RANDOM_URI'}, {'model': FinancialAccountsByAwards, 'financial_accounts_by_awards_id': 777, 'uri': 'RANDOM_URI'}]
for entry in models_to_mock:
baker.make(entry.pop('model'), **entry)
call_command('update_file_c_linkages', '--recalculate-linkages', '--file-d-table=award_search')
expected_results = 999
file_c_award = FinancialAccountsByAwards.objects.filter(financial_accounts_by_awards_id=777).first()
assert (file_c_award is not None)
assert (expected_results == file_c_award.award_id) |
class Fireworks(GraphCanvas.Canvas):
name = 'Skin Fireworks'
_option_cls = OptSkins.OptionsSkin
_js__builder__ = '\n window.addEventListener("resize", resizeCanvas, false);\n window.addEventListener("DOMContentLoaded", onLoad, false);\n \n window.requestAnimationFrame = \n window.requestAnimationFrame || \n window.webkitRequestAnimationFrame || \n window.mozRequestAnimationFrame || \n window.oRequestAnimationFrame || \n window.msRequestAnimationFrame || \n function (callback) {\n window.setTimeout(callback, 1000/60);\n };\n \n var ctx, w, h, particles = [], probability = 0.04, xPoint, yPoint;\n \n function onLoad() {\n ctx = htmlObj.getContext("2d");\n resizeCanvas();\n \n window.requestAnimationFrame(updateWorld);\n } \n \n function resizeCanvas() {\n if (!!htmlObj) {\n w = htmlObj.width = window.innerWidth;\n h = htmlObj.height = window.innerHeight;\n }\n } \n \n function updateWorld() {\n update();\n paint();\n window.requestAnimationFrame(updateWorld);\n } \n \n function update() {\n if (particles.length < 500 && Math.random() < probability) {\n createFirework();\n }\n var alive = [];\n for (var i=0; i<particles.length; i++) {\n if (particles[i].move()) {\n alive.push(particles[i]);\n }\n }\n particles = alive;\n } \n \n function paint() {\n ctx.globalCompositeOperation = \'source-over\';\n ctx.fillStyle = "rgba(0,0,0,0.2)";\n ctx.fillRect(0, 0, w, h);\n ctx.globalCompositeOperation = \'lighter\';\n for (var i=0; i<particles.length; i++) {\n particles[i].draw(ctx);\n }\n } \n \n function createFirework() {\n xPoint = Math.random()*(w-200)+100;\n yPoint = Math.random()*(h-200)+100;\n var nFire = Math.random()*50+100;\n var c = "rgb("+(~~(Math.random()*200+55))+","\n +(~~(Math.random()*200+55))+","+(~~(Math.random()*200+55))+")";\n for (var i=0; i<nFire; i++) {\n var particle = new Particle();\n particle.color = c;\n var vy = Math.sqrt(25-particle.vx*particle.vx);\n if (Math.abs(particle.vy) > vy) {\n particle.vy = particle.vy>0 ? vy: -vy;\n }\n particles.push(particle);\n }\n } \n \n function Particle() {\n this.w = this.h = Math.random()*4+1;\n \n this.x = xPoint-this.w/2;\n this.y = yPoint-this.h/2;\n \n this.vx = (Math.random()-0.5)*10;\n this.vy = (Math.random()-0.5)*10;\n \n this.alpha = Math.random()*.5+.5;\n \n this.color;\n } \n \n Particle.prototype = {\n gravity: 0.05,\n move: function () {\n this.x += this.vx;\n this.vy += this.gravity;\n this.y += this.vy;\n this.alpha -= 0.01;\n if (this.x <= -this.w || this.x >= screen.width ||\n this.y >= screen.height ||\n this.alpha <= 0) {\n return false;\n }\n return true;\n },\n draw: function (c) {\n c.save();\n c.beginPath();\n \n c.translate(this.x+this.w/2, this.y+this.h/2);\n c.arc(0, 0, this.w, 0, Math.PI*2);\n c.fillStyle = this.color;\n c.globalAlpha = this.alpha;\n \n c.closePath();\n c.fill();\n c.restore();\n }\n } \n '
def __str__(self):
self.page.properties.js.add_builders(self.refresh())
return ('<canvas %s>Your browser does not support the HTML5 canvas tag.</canvas>' % self.get_attrs(css_class_names=self.style.get_classes())) |
def c_generate_net_loadable(activations, weights, biases, prefix):
def init_net(name, n_layers, layers_name, buf1_name, buf2_name, buf_length):
init = cgen.struct_init(n_layers, layers_name, buf1_name, buf2_name, buf_length)
o = 'static EmlNet {name} = {init};'.format(**locals())
return o
def init_layer(name, n_outputs, n_inputs, weights_name, biases_name, activation_func):
init = cgen.struct_init(n_outputs, n_inputs, weights_name, biases_name, activation_func)
return init
cgen.assert_valid_identifier(prefix)
buffer_sizes = ([w.shape[0] for w in weights] + [w.shape[1] for w in weights])
buffer_size = max(buffer_sizes)
n_layers = len(activations)
layers_name = (prefix + '_layers')
buf1_name = (prefix + '_buf1')
buf2_name = (prefix + '_buf2')
head_lines = ['#include <eml_net.h>']
layer_lines = []
layers = []
layer_declarations = c_generate_layer_data(activations, weights, biases, prefix, include_constants=False)
for d in layer_declarations:
layer_lines.append(d['code'])
for (layer_no, (l_act, l_weights)) in enumerate(zip(activations, weights)):
(n_in, n_out) = l_weights.shape
layer = f'{prefix}_layer_{layer_no}'
activation_func = ('EmlNetActivation' + l_act.title())
l = init_layer(layer, n_out, n_in, f'{layer}_weights', f'{layer}_biases', activation_func)
layers.append(('\n' + l))
net_lines = [cgen.array_declare(buf1_name, buffer_size, modifiers='static'), cgen.array_declare(buf2_name, buffer_size, modifiers='static'), cgen.array_declare(layers_name, n_layers, dtype='EmlNetLayer', values=layers), init_net(prefix, n_layers, layers_name, buf1_name, buf2_name, buffer_size)]
name = prefix
predict_function = f'''
int32_t
{name}_predict(const float *features, int32_t n_features)
{{
return eml_net_predict(&{name}, features, n_features);
}}
'''
regress_function = f'''
int32_t
{name}_regress(const float *features, int32_t n_features, float *out, int32_t out_length)
{{
return eml_net_regress(&{name}, features, n_features, out, out_length);
}}
'''
regress1_function = f'''
float
{name}_regress1(const float *features, int32_t n_features)
{{
return eml_net_regress1(&{name}, features, n_features);
}}
'''
lines = (((head_lines + layer_lines) + net_lines) + [predict_function, regress_function, regress1_function])
out = '\n'.join(lines)
return out |
class ADB(PlatformUtilBase):
def __init__(self, device=None, tempdir=None):
super(ADB, self).__init__(device, tempdir)
def run(self, *args, **kwargs):
adb = self._addADB()
return super(ADB, self).run(adb, *args, **kwargs)
def push(self, src, tgt):
self.deleteFile(tgt)
return self.run('push', src, tgt)
def pull(self, src, tgt):
return self.run('pull', src, tgt)
def logcat(self, *args, timeout=30, retry=1):
return self.run('logcat', *args, timeout=timeout, retry=retry)
def reboot(self):
try:
self.run('reboot')
return True
except Exception:
getLogger().critical(f'Rebooting failure for device {self.device}.', exc_info=True)
return False
def root(self, silent=False):
return self.restart_adbd(root=True, silent=silent)
def unroot(self, silent=False):
return self.restart_adbd(root=False, silent=silent)
def user_is_root(self):
return (self.get_user() == 'root')
def get_user(self):
try:
return self.shell('whoami', retry=1, silent=True)[0]
except Exception:
getLogger().exception('whoami failed.')
return None
def restart_adbd(self, root=False, silent=False):
user = self.get_user()
if (user is not None):
try:
if (root and (user != 'root')):
if (not silent):
getLogger().info('Restarting adbd with root privilege.')
self.run(['root'], retry=1, silent=True)
elif ((not root) and (user == 'root')):
if (not silent):
getLogger().info('Restarting adbd with nonroot privilege.')
self.run(['unroot'], retry=1, silent=True)
else:
return True
user = self.get_user()
if (not silent):
getLogger().info(f'adbd user is now: {user}.')
return ((user == 'root') if root else (user != 'root'))
except Exception:
err_text = f"Error while restarting adbd with {('non' if (not root) else '')}root privilege."
if silent:
getLogger().error(err_text, exc_info=True)
else:
getLogger().critical(err_text, exc_info=True)
return False
def deleteFile(self, file, **kwargs):
return self.shell(['rm', '-rf', file], **kwargs)
def shell(self, cmd, **kwargs):
dft = None
if ('default' in kwargs):
dft = kwargs.pop('default')
val = self.run('shell', cmd, **kwargs)
if ((val is None) and (dft is not None)):
val = dft
return val
def su_shell(self, cmd, **kwargs):
su_cmd = ['su', '-c']
su_cmd.extend(cmd)
return self.shell(su_cmd, **kwargs)
def getprop(self, property: str, **kwargs) -> str:
if ('default' not in kwargs):
kwargs['default'] = ['']
result = self.run(['shell', 'getprop', property], **kwargs)
if (type(result) is not list):
getLogger().error(f"""adb.getprop("{property}") unexpectedly returned {type(result)} '{result}'.""")
return ''
if (len(result) == 0):
getLogger().error(f'adb.getprop("{property}") returned an empty list.')
return ''
retval = result[0].strip()
getLogger().info(f"""adb.getprop("{property}") returned '{retval}'.""")
return retval
def setprop(self, property, value, **kwargs):
self.shell(['setprop', property, value], **kwargs)
def getBatteryProp(self, property: str, silent=True) -> str:
if self.user_is_root():
path = ('/sys/class/power_supply/battery/' + property)
if (not self.shell(['[', '-f', (('"' + path) + '"'), ']'], retry=1, silent=silent)):
return self.shell(['cat', path], retry=1, silent=silent)[0]
return ''
def isRootedDevice(self, silent=True) -> bool:
try:
ret = self.shell(['id', '-u', '2>&1'], retry=1, silent=silent, ignore_status=silent)
if (not silent):
getLogger().info(f"id -u returned '{ret}'.")
if ('0' in ret):
return True
ret = self.shell(['which', 'su', '2>&1'], retry=1, silent=silent, ignore_status=silent)
if (not silent):
getLogger().info(f"which su returned '{ret}'.")
is_rooted = ((ret is not None) and (len(ret) > 0) and (ret[0].find('not found') == (- 1)))
return is_rooted
except Exception:
return False
def setFrequency(self, target):
if (not self.isRootedDevice()):
getLogger().warning(f'Cannot set frequency on unrooted device {self.device}.')
return
cpus = self._getCPUs()
for cpu in cpus:
freq_target = None
if isinstance(target, dict):
if (cpu in target):
freq_target = target[cpu]
else:
freq_target = 'mid'
elif isinstance(target, string_types):
freq_target = target
else:
raise AssertionError('Unsupported frequency target')
self._setOneCPUFrequency(cpu, freq_target)
def _addADB(self):
adb = ['adb']
if self.device:
adb.extend(['-s', self.device])
return adb
def _setOneCPUFrequency(self, cpu, freq_target):
directory = os.path.join(*['/sys/devices/system/cpu/', cpu, '/'])
scaling_governor = (directory + 'cpufreq/scaling_governor')
self.su_shell(['"echo userspace > {}"'.format(scaling_governor)])
set_scaling_governor = self.su_shell(['cat', scaling_governor]).strip()
assert (set_scaling_governor == 'userspace'), getLogger().fatal('Cannot set scaling governor to userspace')
avail_freq = (directory + 'cpufreq/scaling_available_frequencies')
freqs = self.su_shell(['cat', avail_freq]).strip().split(' ')
assert (len(freqs) > 0), 'No available frequencies'
freq = None
if (freq_target == 'max'):
freq = freqs[(- 1)]
elif (freq_target == 'min'):
freq = freqs[0]
elif (freq_target == 'mid'):
freq = freqs[int((len(freqs) / 2))]
else:
assert re.match('^\\d+$', freq_target), 'Frequency target is not integer'
freq = freq_target
minfreq = (directory + 'cpufreq/scaling_min_freq')
self.su_shell(['"echo {} > {}"'.format(freq, minfreq)])
maxfreq = (directory + 'cpufreq/scaling_max_freq')
self.su_shell(['"echo {} > {}"'.format(freq, maxfreq)])
curr_speed = (directory + 'cpufreq/scaling_cur_freq')
set_freq = self.su_shell(['cat', curr_speed]).strip()
assert (set_freq == freq), 'Unable to set frequency {} for {}'.format(freq_target, cpu)
getLogger().info('On {}, set {} frequency to {}'.format(self.device, cpu, freq))
def _getCPUs(self):
dirs = self.su_shell(['ls', '/sys/devices/system/cpu/'])
dirs = dirs.split('\n')
return [x for x in dirs if re.match('^cpu\\d+$', x)] |
def get_host_profile(ip):
try:
filename = ((('host_profiles/' + str(ip).replace('.', '-')) + '_') + 'profile')
f = open(filename, 'a')
results = api.host(ip)
print('\n')
print(' Basic Host Info ')
print('\n')
print('\n', file=f)
print(' Basic Host Info ', file=f)
print('\n', file=f)
ip_str = results.get('ip_str')
print(('\x1b[1;92mIP:\x1b[0;39m ' + str(ip_str)))
print(('IP: ' + str(ip_str)), file=f)
vulns = results.get('vulns')
print(('\x1b[1;92mVulns:\x1b[0;39m ' + str(vulns)))
print(('Vulns: ' + str(vulns)), file=f)
ports = results.get('ports')
print(('\x1b[1;92mPorts:\x1b[0;39m ' + str(ports)))
print(('Ports: ' + str(ports)), file=f)
os = results.get('os')
print(('\x1b[1;92mOS:\x1b[0;39m ' + str(os)))
print(('OS: ' + str(os)), file=f)
domains = results.get('domains')
print(('\x1b[1;92mDomains:\x1b[0;39m ' + str(domains)))
print(('Domains: ' + str(domains)), file=f)
hostnames = results.get('hostnames')
print(('\x1b[1;92mHostnames:\x1b[0;39m ' + str(hostnames)))
print(('Hostnames: ' + str(hostnames)), file=f)
isp = results.get('isp')
print(('\x1b[1;92mISP:\x1b[0;39m ' + str(isp)))
print(('ISP: ' + str(isp)), file=f)
org = results.get('org')
print(('\x1b[1;92mOrg:\x1b[0;39m ' + str(org)))
print(('Org: ' + str(org)), file=f)
asn = results.get('asn')
print(('\x1b[1;92mASN:\x1b[0;39m ' + str(asn)))
print(('ASN: ' + str(asn)), file=f)
country = results.get('country_name')
print(('\x1b[1;92mCountry:\x1b[0;39m ' + str(country)))
print(('Country: ' + str(country)), file=f)
print('\n')
print('\n', file=f)
for result in results.get('data'):
print('\n')
print(' Host Service ')
print('\n')
print('\n', file=f)
print(' Host Service ', file=f)
print('\n', file=f)
software = result.get('product')
print(('\x1b[1;92mSoftware:\x1b[0;39m ' + str(software)))
print(('Software: ' + str(software)), file=f)
version = result.get('version')
print(('\x1b[1;92mVersion:\x1b[0;39m ' + str(version)))
print(('Version: ' + str(version)), file=f)
port = result.get('port')
print(('\x1b[1;92mPort:\x1b[0;39m ' + str(port)))
print(('Port: ' + str(port)), file=f)
data = result.get('data')
print('\n')
print('\n', file=f)
print(('\x1b[1;92mData:\x1b[0;39m\n' + str(data)))
print(('Data:\n' + str(data)), file=f)
print(('\n\x1b[1;92mResults saved to file:\x1b[0;39m ' + filename))
f.close()
except shodan.APIError as e:
print(f'Error: {e}') |
class bsn_get_switch_pipeline_reply(bsn_header):
version = 6
type = 4
experimenter = 6035143
subtype = 52
def __init__(self, xid=None, pipeline=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (pipeline != None):
self.pipeline = pipeline
else:
self.pipeline = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
packed.append(struct.pack('!256s', self.pipeline))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bsn_get_switch_pipeline_reply()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 4)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
_subtype = reader.read('!L')[0]
assert (_subtype == 52)
obj.pipeline = reader.read('!256s')[0].rstrip('\x00')
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.pipeline != other.pipeline):
return False
return True
def pretty_print(self, q):
q.text('bsn_get_switch_pipeline_reply {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('pipeline = ')
q.pp(self.pipeline)
q.breakable()
q.text('}') |
def dlp_data_type(data, fos):
vdom = data['vdom']
state = data['state']
dlp_data_type_data = data['dlp_data_type']
filtered_data = underscore_to_hyphen(filter_dlp_data_type_data(dlp_data_type_data))
if ((state == 'present') or (state is True)):
return fos.set('dlp', 'data-type', data=filtered_data, vdom=vdom)
elif (state == 'absent'):
return fos.delete('dlp', 'data-type', mkey=filtered_data['name'], vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!') |
def db_reads(dbstate, db_dir):
db_read(dbstate, 'int_l', db_dir)
db_read(dbstate, 'int_r', db_dir)
db_read(dbstate, 'hclk_l', db_dir)
db_read(dbstate, 'hclk_r', db_dir)
db_read(dbstate, 'clbll_l', db_dir)
db_read(dbstate, 'clbll_r', db_dir)
db_read(dbstate, 'clblm_l', db_dir)
db_read(dbstate, 'clblm_r', db_dir)
db_read(dbstate, 'dsp_l', db_dir)
db_read(dbstate, 'dsp_r', db_dir)
db_read(dbstate, 'bram_l', db_dir)
db_read(dbstate, 'bram_r', db_dir) |
def get_stiff_stress(bandwidth: float, kappa: Union[(float, Sequence)], image_coords: np.ndarray, tangents: np.ndarray, straight_thresh: float=1e-08) -> np.ndarray:
assert (bandwidth > 0.0)
assert (image_coords.shape == tangents.shape)
(nimages, ncoords) = image_coords.shape
Fstlist = np.zeros((nimages, ncoords))
start_end_tangent = (image_coords[(- 1)] - image_coords[0])
start_end_tangent /= np.linalg.norm(start_end_tangent)
ovlps = np.einsum('u,vu->v', start_end_tangent, tangents)
if (np.abs((ovlps - 1.0)) <= straight_thresh).all():
return Fstlist
(tangents_perp, _) = calcEholo_vert(image_coords, tangents)
offset = ((0.5 * tangents_perp) * bandwidth)
plus_coords = (image_coords.copy() + offset)
minus_coords = (image_coords.copy() - offset)
try:
len(kappa)
except TypeError:
kappa = ([kappa] * (nimages - 1))
assert (len(kappa) == (nimages - 1))
for k in range(1, (nimages - 1)):
Fstlist[k] = Fstiffness_k(k, kappa[(k - 1)], kappa[k], minus_coords, plus_coords, tangents_perp)
return Fstlist |
def test_read_request_body_in_app_after_middleware_calls_stream(test_client_factory: Callable[([ASGIApp], TestClient)]) -> None:
async def homepage(request: Request):
assert ((await request.body()) == b'')
return PlainTextResponse('Homepage')
class ConsumingMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint):
expected = [b'a', b'']
async for chunk in request.stream():
assert (chunk == expected.pop(0))
assert (expected == [])
return (await call_next(request))
app = Starlette(routes=[Route('/', homepage, methods=['POST'])], middleware=[Middleware(ConsumingMiddleware)])
client: TestClient = test_client_factory(app)
response = client.post('/', content=b'a')
assert (response.status_code == 200) |
def log_syslogd2_override_filter(data, fos):
vdom = data['vdom']
log_syslogd2_override_filter_data = data['log_syslogd2_override_filter']
filtered_data = underscore_to_hyphen(filter_log_syslogd2_override_filter_data(log_syslogd2_override_filter_data))
return fos.set('log.syslogd2', 'override-filter', data=filtered_data, vdom=vdom) |
def test_task_set_image(task_definition):
task_definition.set_images(webserver=u'new-image:123', application=u'app-image:latest')
for container in task_definition.containers:
if (container[u'name'] == u'webserver'):
assert (container[u'image'] == u'new-image:123')
if (container[u'name'] == u'application'):
assert (container[u'image'] == u'app-image:latest') |
class CoconutXontribLoader(object):
loaded = False
compiler = None
runner = None
timing_info = []
_with_exceptions(128)
def memoized_parse_xonsh(self, code):
return self.compiler.parse_xonsh(code, keep_state=True)
def compile_code(self, code, log_name='parse'):
from coconut.exceptions import CoconutException
from coconut.terminal import format_error
from coconut.util import get_clock_time
from coconut.terminal import logger
parse_start_time = get_clock_time()
(quiet, logger.quiet) = (logger.quiet, True)
success = False
try:
compiled = self.memoized_parse_xonsh(code.strip())
except CoconutException as err:
err_str = format_error(err).splitlines()[0]
compiled = ((code + ' #') + err_str)
else:
success = True
finally:
logger.quiet = quiet
self.timing_info.append((log_name, (get_clock_time() - parse_start_time)))
return (compiled, success)
def new_try_subproc_toks(self, ctxtransformer, node, *args, **kwargs):
mode = ctxtransformer.mode
if self.loaded:
ctxtransformer.mode = 'eval'
try:
return ctxtransformer.__class__.try_subproc_toks(ctxtransformer, node, *args, **kwargs)
finally:
ctxtransformer.mode = mode
def new_parse(self, parser, code, mode='exec', *args, **kwargs):
if (self.loaded and (mode in enabled_xonsh_modes)):
(code, _) = self.compile_code(code)
return parser.__class__.parse(parser, code, *args, mode=mode, **kwargs)
def new_ctxvisit(self, ctxtransformer, node, inp, ctx, mode='exec', *args, **kwargs):
if (self.loaded and (mode in enabled_xonsh_modes)):
from xonsh.tools import get_logical_line
from coconut.terminal import logger
from coconut.compiler.util import extract_line_num_from_comment
(compiled, success) = self.compile_code(inp, log_name='ctxvisit')
if success:
original_lines = tuple(inp.splitlines())
remaining_ln_pieces = {}
new_inp_lines = []
last_ln = 1
for compiled_line in compiled.splitlines():
ln = extract_line_num_from_comment(compiled_line, default=(last_ln + 1))
try:
(line, _, _) = get_logical_line(original_lines, (ln - 1))
except IndexError:
logger.log_exc()
line = original_lines[(- 1)]
remaining_pieces = remaining_ln_pieces.get(ln)
if (remaining_pieces is None):
with self.compiler.inner_environment():
line_no_strs = self.compiler.remove_strs(line, inner_environment=False)
if ((line_no_strs is not None) and (';' in line_no_strs)):
remaining_pieces = [self.compiler.reformat(piece, ignore_errors=True) for piece in line_no_strs.split(';')]
else:
remaining_pieces = [line]
if remaining_pieces:
new_line = remaining_pieces.pop(0)
else:
new_line = ''
remaining_ln_pieces[ln] = remaining_pieces
new_inp_lines.append(new_line)
last_ln = ln
inp = '\n'.join(new_inp_lines)
inp += '\n'
return ctxtransformer.__class__.ctxvisit(ctxtransformer, node, inp, ctx, mode, *args, **kwargs)
def __call__(self, xsh, **kwargs):
from coconut.util import get_clock_time
start_time = get_clock_time()
if (self.compiler is None):
from coconut.compiler import Compiler
self.compiler = Compiler(**coconut_kernel_kwargs)
self.compiler.warm_up(enable_incremental_mode=interpreter_uses_incremental)
if (self.runner is None):
from coconut.command.util import Runner
self.runner = Runner(self.compiler)
self.runner.update_vars(xsh.ctx)
main_parser = xsh.execer.parser
main_parser.parse = MethodType(self.new_parse, main_parser)
ctxtransformer = xsh.execer.ctxtransformer
ctxtransformer.try_subproc_toks = MethodType(self.new_try_subproc_toks, ctxtransformer)
ctxtransformer.ctxvisit = MethodType(self.new_ctxvisit, ctxtransformer)
ctx_parser = ctxtransformer.parser
ctx_parser.parse = MethodType(self.new_parse, ctx_parser)
self.timing_info.append(('load', (get_clock_time() - start_time)))
self.loaded = True
return self.runner.vars
def unload(self, xsh):
if (not self.loaded):
from coconut.terminal import logger
logger.warn('attempting to unload Coconut xontrib but it was already unloaded')
self.loaded = False |
def test_multiple_regex():
ignored = get_ignore([], ['\\.md', 'one\\.rst'])
assert (not ignored('amazing-file.txt'))
assert (not ignored('module.pyc'))
assert ignored('one.rst')
assert (not ignored('two.rst'))
assert ignored('one.md')
assert (not ignored('foo/random.txt'))
assert (not ignored('bar/__pycache__/file.pyc')) |
def extractVannie03WordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
titlemap = [('MPR Chapter ', 'My Pervert Roommate', 'translated')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_moving_std_returns_correct_array_with_1d_uint8_data(data_uint8):
moved_data = scared.signal_processing.moving_std(data_uint8, 10)
reference_data = []
for i in range(((data_uint8.shape[(- 1)] - 10) + 1)):
reference_data.append(data_uint8[i:(i + 10)].std())
reference_data = np.array(reference_data)
assert (max_diff_percent(moved_data, reference_data) < 1e-06) |
class DashboardConfig(BaseModel):
name: str
panels: List[DashboardPanel]
tabs: List[DashboardTab] = []
tab_id_to_panel_ids: Dict[(str, List[str])] = {}
def add_panel(self, panel: DashboardPanel, *, tab: Optional[Union[(str, uuid.UUID, DashboardTab)]]=None, create_if_not_exists=True):
self.panels.append(panel)
if (tab is None):
return
result_tab = self._get_or_create_tab(tab, create_if_not_exists)
tab_id_str = str(result_tab.id)
panel_id_str = str(panel.id)
tab_panel_ids = self.tab_id_to_panel_ids.get(tab_id_str, [])
if (panel_id_str not in tab_panel_ids):
tab_panel_ids.append(panel_id_str)
self.tab_id_to_panel_ids[tab_id_str] = tab_panel_ids
def create_tab(self, title) -> DashboardTab:
return self._get_or_create_tab(title)
def _raise_if_tab_title_exists(self, tab_title: Optional[str]):
if any(((tab.title == tab_title) for tab in self.tabs)):
raise ValueError(f'tab with title "{tab_title}" already exists')
def _find_tab_by_id(self, tab_id: uuid.UUID) -> Optional[DashboardTab]:
tabs = [t for t in self.tabs if (t.id == tab_id)]
if (len(tabs) == 0):
return None
return tabs[0]
def _find_tab_by_title(self, title: str) -> Optional[DashboardTab]:
tabs = [t for t in self.tabs if (t.title == title)]
if (len(tabs) == 0):
return None
return tabs[0]
def _get_or_create_tab(self, tab_descriptor: Union[(DashboardTab, uuid.UUID, str)], create_if_not_exists=True) -> DashboardTab:
tab: Optional[DashboardTab] = None
to_create: Optional[DashboardTab] = None
if isinstance(tab_descriptor, DashboardTab):
tab = self._find_tab_by_id(tab_descriptor.id)
to_create = tab_descriptor
if isinstance(tab_descriptor, str):
try:
tab = self._find_tab_by_id(uuid.UUID(tab_descriptor))
except ValueError:
tab = self._find_tab_by_title(tab_descriptor)
to_create = DashboardTab(title=tab_descriptor)
if isinstance(tab_descriptor, uuid.UUID):
tab = self._find_tab_by_id(tab_descriptor)
if (tab is not None):
return tab
if ((not create_if_not_exists) or (to_create is None)):
raise ValueError(f'tab "{tab_descriptor}" not found')
self.tabs.append(to_create)
return to_create
def build(self, data_storage: 'DataStorage', project_id: ProjectID, timestamp_start: Optional[datetime.datetime], timestamp_end: Optional[datetime.datetime]):
widgets = [p.safe_build(data_storage, project_id, timestamp_start, timestamp_end) for p in self.panels]
return DashboardInfo(name=self.name, widgets=widgets) |
def _run_server_isolated(process_factory, host, port):
print('\n[Starting server process...]')
server = process_factory(host, port)
(yield server)
if _WIN32:
import signal
print('\n[Sending CTRL+C (SIGINT) to server process...]')
server.send_signal(signal.CTRL_C_EVENT)
try:
server.wait(timeout=10)
except KeyboardInterrupt:
pass
except subprocess.TimeoutExpired:
print('\n[Killing stubborn server process...]')
server.kill()
server.communicate()
pytest.fail('Server process did not exit in a timely manner and had to be killed.')
else:
print('\n[Sending SIGTERM to server process...]')
server.terminate()
try:
server.communicate(timeout=10)
except subprocess.TimeoutExpired:
print('\n[Killing stubborn server process...]')
server.kill()
server.communicate()
pytest.fail('Server process did not exit in a timely manner and had to be killed.') |
('chromadb.api.models.Collection.Collection.add', MagicMock)
def test_query_with_where_in_params(app):
with patch.object(app, '_retrieve_from_database') as mock_retrieve:
mock_retrieve.return_value = ['Test context']
with patch.object(app.llm, 'get_llm_model_answer') as mock_answer:
mock_answer.return_value = 'Test answer'
answer = app.query('Test query', where={'attribute': 'value'})
assert (answer == 'Test answer')
(_, kwargs) = mock_retrieve.call_args
assert (kwargs.get('input_query') == 'Test query')
assert (kwargs.get('where') == {'attribute': 'value'})
mock_answer.assert_called_once() |
(scope='session', autouse=True)
def hide_window(request):
if request.config.getoption('--show-gui'):
(yield)
return
old_value = os.environ.get('QT_QPA_PLATFORM')
if (sys.platform == 'darwin'):
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
else:
os.environ['QT_QPA_PLATFORM'] = 'minimal'
(yield)
if (old_value is None):
del os.environ['QT_QPA_PLATFORM']
else:
os.environ['QT_QPA_PLATFORM'] = old_value |
class Lists():
def __init__(self, ui):
self.page = ui.page
def select(self, records=None, html_code: str=None, selected: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, '%'), profile: types.PROFILE_TYPE=None, multiple: bool=False, options: dict=None) -> html.HtmlSelect.Select:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='%')
records = (records or [])
if (not isinstance(records, list)):
records = [{'text': v, 'value': v, 'selected': True} for v in records.split(',')]
options = (options or {})
options['selected'] = selected
if multiple:
if (not isinstance(multiple, dict)):
multiple = {'max': 2}
if (selected is not None):
for rec in records:
if (rec['value'] in selected):
rec['selected'] = True
icon_details = self.page.icons.get('check')
options['iconBase'] = 'iconBase'
options['tickIcon'] = icon_details['icon']
html_select = html.HtmlSelect.Select(self.page, records, html_code, width, height, profile, multiple, options)
html.Html.set_component_skin(html_select)
return html_select
if (selected is not None):
for rec in records:
if (rec['value'] == selected):
rec['selected'] = True
html_select = html.HtmlSelect.Select(self.page, records, html_code, width, height, profile, multiple, options)
html.Html.set_component_skin(html_select)
return html_select
def lookup(self, lookup=None, html_code: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, '%'), profile: types.PROFILE_TYPE=None, multiple: bool=False, options: dict=None) -> html.HtmlSelect.Lookup:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='%')
options = ({} if (options is None) else options)
html_select = html.HtmlSelect.Lookup(self.page, lookup, html_code, width, height, profile, multiple, options)
html.Html.set_component_skin(html_select)
return html_select
def item(self, text: str=None, tag: str=None, options: dict=None) -> html.HtmlList.Li:
options = (options or {})
if (tag is not None):
options['item_type'] = tag
html_item = html.HtmlList.Li(self.page, text, options=options)
html.Html.set_component_skin(html_item)
return html_item
def list(self, data=None, color: str=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dfl_options = {'item_type': 'li'}
if (options is not None):
dfl_options.update(options)
html_list = html.HtmlList.List(self.page, (data or []), color, width, height, html_code, helper, dfl_options, profile)
html_list.css({'list-style': 'none'})
html.Html.set_component_skin(html_list)
return html_list
def drop(self, data=None, color=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
component = self.list(data, color, width, height, html_code, helper, options, profile)
component.style.css.min_height = 40
component.css({'display': 'inline-block', 'margin-top': '5px', 'border': ('1px dashed %s' % self.page.theme.greys[4])})
if (component.style.css.width.endswith('%') and component.style.css.margin.endswith('px')):
component.style.css.width = 'calc({} - {}px)'.format(component.style.css.width, (2 * int(component.style.css.margin[:(- 2)])))
component.style.css.padding = 5
html.Html.set_component_skin(component)
return component
def items(self, records: list=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=('auto', ''), options: dict=None, html_code: str=None, profile: types.PROFILE_TYPE=None, helper: str=None) -> html.HtmlList.Items:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dft_options = {'items_type': 'text'}
if (options is not None):
dft_options.update(options)
dft_options['li_height'] = Defaults_css.Font.header_size
html_item = html.HtmlList.Items(self.page, (records or []), width, height, dft_options, html_code, profile, helper)
html_item.css({'list-style-type': 'none'})
if ((height[0] is not None) and (height[1] == 'px')):
html_item.css({'overflow-y': 'auto'})
html.Html.set_component_skin(html_item)
return html_item
def links(self, records=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=('auto', ''), options: dict=None, html_code: str=None, profile: types.PROFILE_TYPE=None, helper: str=None):
component = self.items(records, width, height, options, html_code, profile, helper)
component.options.items_type = 'link'
html.Html.set_component_skin(component)
return component
def icons(self, records=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=('auto', ''), options: dict=None, html_code: str=None, profile: types.PROFILE_TYPE=None, helper: str=None):
component = self.items(records, width, height, options, html_code, profile, helper)
component.options.items_type = 'icon'
html.Html.set_component_skin(component)
return component
def pills(self, records=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, '%'), options: dict=None, html_code: str=None, profile: types.PROFILE_TYPE=None, helper: str=None):
html_item = self.items(records, width, height, options, html_code, profile, helper)
html_item.options.li_style = {'display': 'inline-block', 'margin': '0 2px', 'padding': '1px 4px', 'border-radius': '10px', 'background': self.page.theme.greys[2]}
html.Html.set_component_skin(html_item)
return html_item
def box(self, records: list=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, '%'), options: dict=None, html_code: str=None, profile: types.PROFILE_TYPE=None, helper: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='%')
dflt_options = {'items_type': 'box'}
if (options is not None):
dflt_options.update(options)
html_item = html.HtmlList.Items(self.page, records, width, height, dflt_options, html_code, profile, helper)
html_item.css({'list-style-type': 'none'})
html_item.style.css.padding_left = '15px'
html.Html.set_component_skin(html_item)
return html_item
def numbers(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, options: dict=None, profile: types.PROFILE_TYPE=None, helper: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'decimal'
html_list.style.css.margin_left = 20
html.Html.set_component_skin(html_list)
return html_list
def alpha(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, options: dict=None, profile: types.PROFILE_TYPE=None, helper: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'lower-alpha'
html_list.style.css.margin_left = 20
html.Html.set_component_skin(html_list)
return html_list
def roman(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, options: str=None, profile: types.PROFILE_TYPE=None, helper: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'lower-roman'
html_list.style.css.margin_left = 20
html.Html.set_component_skin(html_list)
return html_list
def points(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), align: str=None, html_code: str=None, options: dict=None, profile: types.PROFILE_TYPE=None, helper: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'circle'
html_list.style.css.margin_left = 20
if (align == 'center'):
html_list.style.css.margin = 'auto'
html_list.style.css.display = 'block'
html.Html.set_component_skin(html_list)
return html_list
def disc(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'disc'
html_list.style.css.margin_left = 20
html.Html.set_component_skin(html_list)
return html_list
def squares(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_list = self.items(data, width, height, options, html_code, profile, helper)
html_list.style.css.list_style_type = 'square'
html_list.style.css.margin_left = 20
html.Html.set_component_skin(html_list)
return html_list
def groups(self, data=None, categories=None, color: str=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
data = (data or [])
categories = (categories or [''])
if (len(data) > 0):
if isinstance(data[0], list):
categories = ([''] * len(data))
else:
data = [data]
html_obj = html.HtmlList.Groups(self.page, data, None, categories, color, width, height, html_code, helper, options, profile)
html.Html.set_component_skin(html_obj)
return html_obj
def tree(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None) -> html.HtmlTrees.Tree:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
html_tree = html.HtmlTrees.Tree(self.page, (data or []), width, height, html_code, helper, options, profile)
html.Html.set_component_skin(html_tree)
return html_tree
def dropdown(self, records=None, text: str='', width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dftl_options = {'width': 90}
dftl_options.update((options or {}))
html_d = html.HtmlTrees.DropDown(self.page, records, text, width, height, html_code, helper, dftl_options, profile)
html_d.style.css.display = 'inline-block'
html.Html.set_component_skin(html_d)
return html_d
def checks(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dft_options = {'checked': False, 'items_type': 'check'}
if (options is not None):
dft_options.update(options)
html_list = html.HtmlList.Items(self.page, (data or []), width, height, dft_options, html_code, profile, helper)
html_list.css({'list-style': 'none'})
html.Html.set_component_skin(html_list)
return html_list
def badges(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dft_options = {'badge': {'background': 'red', 'color': 'white', 'items_type': 'badge'}}
if (options is not None):
dft_options.update(options)
html_list = html.HtmlList.Items(self.page, (data or []), width, height, dft_options, html_code, profile, helper)
html_list.css({'list-style': 'none'})
html.Html.set_component_skin(html_list)
return html_list
def icons(self, data=None, width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
icon_details = self.page.icons.get('check')
dft_options = {'icon': icon_details['icon'], 'markdown': True, 'items_type': 'icon'}
if (options is not None):
dft_options.update(options)
html_list = html.HtmlList.Items(self.page, (data or []), width, height, dft_options, html_code, profile, helper)
html_list.css({'list-style': 'none'})
html.Html.set_component_skin(html_list)
return html_list
def radios(self, data=None, group_name: str='group', width: types.SIZE_TYPE=('auto', ''), height: types.SIZE_TYPE=(None, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dft_options = {'items_type': 'radio'}
if (options is not None):
dft_options.update(options)
html_list = html.HtmlList.Items(self.page, (data or []), width, height, dft_options, html_code, profile, helper)
html_list.options.group = group_name
html_list.css({'list-style': 'none'})
html.Html.set_component_skin(html_list)
return html_list
def brackets(self, records=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(550, 'px'), options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dfp_options = {}
if (options is not None):
dfp_options.update(options)
component = html.HtmlList.ListTournaments(self.page, records, width, height, dfp_options, profile)
html.Html.set_component_skin(component)
return component
def chips(self, items=None, category: str='group', placeholder: str='', width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(60, 'px'), html_code: str=None, helper: str=None, options: dict=None, profile: types.PROFILE_TYPE=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
dfl_options = {'item_css': {'padding': '5px', 'border': ('1px solid %s' % self.page.theme.success.light), 'border-radius': '5px', 'margin': '2px', 'width': 'auto', 'display': 'inline-block', 'background': 'inherit', 'white-space': 'nowrap'}, 'category': category, 'visible': True, 'value_css': {'font-size': self.page.body.style.globals.font.normal(), 'font-weight': 'bold', 'vertical-align': 'bottom'}, 'category_css': {'display': 'inline', 'margin-right': '2px', 'vertical-align': 'top', 'font-size': self.page.body.style.globals.font.normal((- 3))}, 'icon_css': {'color': self.page.theme.success.base, 'margin-left': '5px', 'cursor': 'pointer'}}
if ((not hasattr(category, 'toStr')) and (category == 'group')):
dfl_options['visible'] = False
if (options is not None):
dfl_options.update(options)
html_f = html.HtmlEvent.Filters(self.page, (items or []), width, height, html_code, helper, dfl_options, profile)
html_f.input.attr['placeholder'] = placeholder
html.Html.set_component_skin(html_f)
return html_f
def menu(self, component, title: Union[(str, dict)]=None, add: bool=False, height=(18, 'px'), save_funcs=None, update_funcs=None, editable: bool=False, options: dict=None, profile: types.PROFILE_TYPE=None, checks: tuple=('fas fa-check-square', 'far fa-square')):
commands = ([('Add ', 'fas fa-plus')] if add else [])
if (getattr(component.options, 'items_type', '') in ('check',)):
commands.append(('Check', checks))
options = (options or {})
menu_items = []
for (typ, icon) in commands:
if icon:
if isinstance(icon, tuple):
icon = icon[0]
r = self.page.ui.icons.awesome(icon, text=typ, align='center', height=height, width=(35, 'px'), options=options, profile=profile)
r.span.style.css.line_height = r.style.css.height
r.icon.style.css.font_factor(options.get('icon_size', Defaults_css.MENU_ICON_SIZE))
r.style.css.font_factor(options.get('icon_size', Defaults_css.MENU_ICON_SIZE))
r.span.style.css.margin = '0 0 -3px -3px'
r.icon.style.add_classes.div.color_hover()
if (typ == 'Add '):
r.click([component.dom.add(''), r.dom.css({'background': self.page.theme.greys[2], 'border-radius': '10px'}).r, self.page.js.window.setTimeout([r.dom.css({'background': 'none'}).r], 2000)])
if (typ == 'Check'):
r.click([r.dom.css({'background': self.page.theme.greys[2], 'border-radius': '10px'}).r, self.page.js.window.setTimeout([r.dom.css({'background': 'none'}).r], 2000), self.page.js.if_((r.span.dom.innerText() == 'Check'), [r.span.build('None'), component.dom.selectAll(), r.icon.build(checks[1])]).else_([r.span.build('Check'), component.dom.unSelectAll(), r.icon.build(checks[0])])], profile=profile)
menu_items.append(r)
if (save_funcs is not None):
r = self.page.ui.icons.awesome('save', align='center', text='Save', height=height, width=(35, 'px'), options=options, profile=profile)
r.span.style.css.line_height = r.style.css.height
r.icon.style.css.font_factor(options.get('icon_size', Defaults_css.MENU_ICON_SIZE))
r.style.css.font_factor(options.get('icon_size', Defaults_css.MENU_ICON_SIZE))
r.span.style.css.margin = '0 2px -3px -3px'
r.icon.style.add_classes.div.color_hover()
r.click(([r.dom.css({'background': self.page.theme.success.light, 'border-radius': '10px'}).r, self.page.js.window.setTimeout([r.dom.css({'background': 'none'}).r], 2000)] + save_funcs), profile=profile)
menu_items.append(r)
if (not editable):
container = self.page.ui.menu(component, update_funcs=update_funcs, title=title, menu_items=menu_items, editable=editable, options=options)
elif (editable is True):
container = self.page.ui.menu(component, update_funcs=update_funcs, title=title, menu_items=menu_items, options=options)
else:
container = self.page.ui.menu(component, update_funcs=update_funcs, title=title, menu_items=menu_items, editable=editable, options=options)
html.Html.set_component_skin(container)
return container
def filters(self, items=None, button=None, width=('auto', ''), height=(60, 'px'), html_code: str=None, helper: str=None, options: dict=None, autocomplete: bool=False, profile: Union[(bool, dict)]=None):
options = (options or {})
container = self.page.ui.div(width=width)
container.select = self.page.ui.select(html_code=(('%s_select' % html_code) if (html_code is not None) else html_code))
container.select.attr['data-width'] = ('%spx' % options.get('width', Defaults.TEXTS_SPAN_WIDTH))
container.select.options.liveSearch = True
if autocomplete:
container.input = self.page.ui.inputs.autocomplete(html_code=(('%s_input' % html_code) if (html_code is not None) else html_code), width=(Defaults.INPUTS_MIN_WIDTH, 'px'), options={'select': True})
else:
container.input = self.page.ui.input(html_code=(('%s_input' % html_code) if (html_code is not None) else html_code), width=(Defaults.INPUTS_MIN_WIDTH, 'px'), options={'select': True})
container.input.style.css.text_align = 'left'
container.input.style.css.padding_left = 5
container.input.style.css.margin_left = 10
if (button is None):
button = self.page.ui.buttons.colored('add')
button.style.css.margin_left = 10
container.button = button
container.clear = self.page.ui.icon('times')
container.clear.style.css.color = self.page.theme.danger.base
container.clear.style.css.margin_left = 20
container.clear.tooltip('Clear all filters')
container.add(self.page.ui.div([container.select, container.input, container.button, container.clear]))
container.filters = self.page.ui.panels.filters(items, container.select.dom.content, (100, '%'), height, html_code, helper, options, profile)
container.add(container.filters)
container.clear.click([container.filters.dom.clear()])
container.button.click([container.filters.dom.add(container.input.dom.content, container.select.dom.content), container.input.js.empty()])
container.input.enter(container.button.dom.events.trigger('click'))
html.Html.set_component_skin(container)
return container |
class GraphThread(QObject, threading.Thread):
graph = Signal(list)
error = Signal(str)
info_signal = Signal(str, bool)
def __init__(self, current_path, root_path):
QObject.__init__(self)
threading.Thread.__init__(self)
self.setDaemon(True)
self.root_path = root_path
def run(self):
try:
self.info_signal.emit(('build tree: start for %s' % self.root_path), False)
result = []
filelist = nm.nmd().launch.get_included_files(self.root_path, recursive=True, search_in_ext=nm.settings().SEARCH_IN_EXT)
for inc_file in filelist:
rospy.logdebug(('build tree: append file: %s' % inc_file.inc_path))
inc_file.unset_default_args = self.find_default_args(inc_file.inc_path, inc_file.args)
result.append(inc_file)
if (not inc_file.exists):
self.info_signal.emit(('build tree: skip parse %s, not exist' % inc_file.inc_path), True)
self.graph.emit(result)
except exceptions.GrpcTimeout as tout:
rospy.logwarn(('Build launch tree failed! Daemon not responded within %.2f seconds while get configuration file: %s\nYou can try to increase the timeout for GRPC requests in node manager settings.' % (nm.settings().timeout_grpc, tout.remote)))
self.error.emit('failed: timeout')
except Exception:
import traceback
formatted_lines = traceback.format_exc(1).splitlines()
try:
rospy.logwarn('Error while parse launch file for includes:\n\t%s', formatted_lines[(- 5)])
except Exception:
pass
self.error.emit(('failed: %s' % formatted_lines[(- 1)]))
def find_default_args(self, path, inc_args):
not_set_args = {}
if (path and (not (path.endswith('.launch') or (path.find('.launch.') > 0)))):
return not_set_args
if rospy.is_shutdown():
return not_set_args
try:
(_, _, data) = nm.nmd().file.get_file_content(path)
launch_node = None
xml_nodes = minidom.parseString(data.encode('utf-8')).getElementsByTagName('launch')
if xml_nodes:
launch_node = xml_nodes[(- 1)]
if (launch_node is not None):
default_args = get_internal_args(data, only_default=True)
for (arg_in_file, arg_value) in default_args.items():
if (arg_in_file not in inc_args):
not_set_args[arg_in_file] = arg_value
except Exception as err:
msg = ("can't get default arguments for %s: %s" % (path, utf8(err)))
self.error.emit(msg)
rospy.logwarn(msg)
return not_set_args |
_toolkit([ToolkitName.wx])
class TestWxControlWidgetRegistry(unittest.TestCase):
def setUp(self):
self.widget = wx.Window()
self.registry = get_widget_registry()
self.target = TargetWithControl(self.widget)
self.good_wrapper = UIWrapper(target=self.target, registries=[self.registry])
def test_is_enabled(self):
self.widget.Enable(True)
self.assertTrue(self.good_wrapper.inspect(IsEnabled()))
def test_is_visible(self):
self.widget.Show(True)
self.assertTrue(self.good_wrapper.inspect(IsVisible()))
def test_is_invisible(self):
self.widget.Hide()
self.assertFalse(self.good_wrapper.inspect(IsVisible()))
def test_get_interactions_good_target(self):
self.assertEqual(self.registry._get_interactions(self.target), set([IsEnabled, IsVisible]))
def test_get_interactions_bad_target(self):
self.assertEqual(self.registry._get_interactions(None), set())
def test_get_interaction_doc(self):
self.assertGreater(len(self.registry._get_interaction_doc(self.target, IsEnabled)), 0)
self.assertGreater(len(self.registry._get_interaction_doc(self.target, IsVisible)), 0)
def test_get_location_solver(self):
with self.assertRaises(LocationNotSupported):
self.registry._get_solver(self.target, None)
def test_get_locations(self):
self.assertEqual(self.registry._get_locations(self.target), set())
def test_error_get_location_doc(self):
with self.assertRaises(LocationNotSupported):
self.registry._get_location_doc(self.target, None) |
class World(BaseWorld, Mapping[(RVIdentifier, torch.Tensor)]):
def __init__(self, observations: Optional[RVDict]=None, initialize_fn: InitializeFn=init_from_prior) -> None:
self.observations: RVDict = (observations or {})
self._initialize_fn: InitializeFn = initialize_fn
self._variables: Dict[(RVIdentifier, Variable)] = {}
self._call_stack: List[_TempVar] = []
def initialize_world(cls: type[T], queries: Iterable[RVIdentifier], observations: Optional[RVDict]=None, initialize_fn: InitializeFn=init_to_uniform, max_retries: int=100, **kwargs) -> T:
observations = (observations or {})
for _ in range(max_retries):
world = cls(observations, initialize_fn, **kwargs)
for node in queries:
world.call(node)
for node in observations:
world.call(node)
log_prob = world.log_prob()
if ((not torch.isinf(log_prob)) and (not torch.isnan(log_prob))):
return world
raise ValueError(f'Cannot find a valid initialization after {max_retries} retries. The model might be misspecified.')
def __getitem__(self, node: RVIdentifier) -> torch.Tensor:
return self._variables[node].value
def get_variable(self, node: RVIdentifier) -> Variable:
return self._variables[node]
def replace(self, values: RVDict) -> World:
assert (not any(((node in self.observations) for node in values)))
new_world = self.copy()
for (node, value) in values.items():
new_world._variables[node] = new_world._variables[node].replace(value=value.clone())
nodes_to_update = set().union(*(self._variables[node].children for node in values))
for node in nodes_to_update:
(new_distribution, new_parents) = new_world._run_node(node)
old_node_var = new_world._variables[node]
new_world._variables[node] = old_node_var.replace(parents=new_parents, distribution=new_distribution)
dropped_parents = (old_node_var.parents - new_parents)
for parent in dropped_parents:
parent_var = new_world._variables[parent]
new_world._variables[parent] = parent_var.replace(children=(parent_var.children - {node}))
return new_world
def __iter__(self) -> Iterator[RVIdentifier]:
return iter(self._variables)
def __len__(self) -> int:
return len(self._variables)
def latent_nodes(self) -> Set[RVIdentifier]:
return (self._variables.keys() - self.observations.keys())
def copy(self) -> World:
world_copy = World(self.observations.copy(), self._initialize_fn)
world_copy._variables = self._variables.copy()
return world_copy
def initialize_value(self, node: RVIdentifier) -> None:
(distribution, parents) = self._run_node(node)
if (node in self.observations):
node_val = self.observations[node]
else:
node_val = self._initialize_fn(distribution)
self._variables[node] = Variable(value=node_val, distribution=distribution, parents=parents)
def update_graph(self, node: RVIdentifier) -> torch.Tensor:
if (node not in self._variables):
self.initialize_value(node)
node_var = self._variables[node]
if (len(self._call_stack) > 0):
tmp_child_var = self._call_stack[(- 1)]
tmp_child_var.parents.add(node)
node_var.children.add(tmp_child_var.node)
return node_var.value
def log_prob(self, nodes: Optional[Collection[RVIdentifier]]=None) -> torch.Tensor:
if (nodes is None):
nodes = self._variables.keys()
log_prob = torch.tensor(0.0)
for node in set(nodes):
log_prob = (log_prob + torch.sum(self._variables[node].log_prob))
return log_prob
def enumerate_node(self, node: RVIdentifier) -> torch.Tensor:
distribution = self._variables[node].distribution
if (not distribution.has_enumerate_support):
raise ValueError((str(node) + ' is not enumerable'))
return distribution.enumerate_support()
def _run_node(self, node: RVIdentifier) -> Tuple[(dist.Distribution, Set[RVIdentifier])]:
self._call_stack.append(_TempVar(node))
with self:
distribution = node.function(*node.arguments)
temp_var = self._call_stack.pop()
if (not isinstance(distribution, dist.Distribution)):
raise TypeError('A random_variable is required to return a distribution.')
return (distribution, temp_var.parents) |
class OptionSeriesScatterSonificationContexttracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
.parametrize('examples_file', ['ner.json', 'ner.yml', 'ner.jsonl'])
def test_jinja_template_rendering_with_examples(examples_dir: Path, examples_file: str):
labels = 'PER,ORG,LOC'
nlp = spacy.blank('en')
doc = nlp.make_doc('Alice and Bob went to the supermarket')
examples = fewshot_reader((examples_dir / examples_file))
llm_ner = make_ner_task_v3(examples=examples, labels=labels)
prompt = list(llm_ner.generate_prompts([doc]))[0]
assert (prompt.strip() == '\nYou are an expert Named Entity Recognition (NER) system.\nYour task is to accept Text as input and extract named entities.\nEntities must have one of the following labels: LOC, ORG, PER.\nIf a span is not an entity label it: `==NONE==`.\n\nQ: Given the paragraph below, identify a list of entities, and for each entry explain why it is or is not an entity:\n\nParagraph: Jack and Jill went up the hill.\nAnswer:\n1. Jack | True | PER | is the name of a person\n2. Jill | True | PER | is the name of a person\n3. went up | False | ==NONE== | is a verb\n4. hill | True | LOC | is a location\n\nParagraph: Alice and Bob went to the supermarket\nAnswer:\n'.strip()) |
def exposed_delete_feed(feed_name, do_delete, search_str):
with db.session_context() as sess:
items = sess.query(db.RssFeedPost).filter((db.RssFeedPost.feed_entry.feed_name == feed_name)).all()
do_delete = ('true' in do_delete.lower())
searchitems = search_str.split('|')
for item in items:
itemall = ' '.join(([item.title] + item.tags))
if all([(searchstr in itemall) for searchstr in searchitems]):
print(itemall)
if do_delete:
print('Deleting item')
sess.delete(item)
sess.commit() |
def downgrade():
op.execute('ALTER TABLE transfers DROP CONSTRAINT transfers_pkey')
op.create_index('ix_transfers_block_number', 'transfers', ['block_number'])
op.create_primary_key('transfers_pkey', 'transfers', ['transaction_hash', 'trace_address'])
op.execute('ALTER TABLE miner_payments DROP CONSTRAINT miner_payments_pkey')
op.create_index('ix_block_number', 'miner_payments', ['block_number'])
op.create_primary_key('miner_payments_pkey', 'miner_payments', ['transaction_hash']) |
def compose_breakpoint_graph(base_dot, predicted_dot, true_edges):
base_graph = nx.read_dot(base_dot)
predicted_edges = nx.read_dot(predicted_dot)
out_graph = nx.MultiGraph()
for (v1, v2, data) in base_graph.edges(data=True):
color = g2c(data['genome_id'])
label = ('oo' if (data['infinity'] == 'True') else '')
out_graph.add_edge(v1, v2, color=color, label=label)
for (v1, v2) in predicted_edges.edges:
out_graph.add_edge(v1, v2, color='red', style='dashed')
for (v1, v2, infinite) in true_edges:
label = ('oo' if infinite else '')
out_graph.add_edge(str(v1), str(v2), color='red', style='bold', label=label)
return out_graph |
class OptionSeriesScatterSonificationContexttracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def parse_block_identifier_no_extra_call(w3: Union[('Web3', 'AsyncWeb3')], block_identifier: BlockIdentifier) -> BlockIdentifier:
if (block_identifier is None):
return w3.eth.default_block
elif (isinstance(block_identifier, int) and (block_identifier >= 0)):
return block_identifier
elif (block_identifier in ['latest', 'earliest', 'pending', 'safe', 'finalized']):
return block_identifier
elif isinstance(block_identifier, bytes):
return HexBytes(block_identifier)
elif is_hex_encoded_block_hash(block_identifier):
return HexStr(str(block_identifier))
else:
raise BlockNumberOutofRange |
class CMYK(Space):
BASE = 'srgb'
NAME = 'cmyk'
SERIALIZE = ('--cmyk',)
CHANNELS = (Channel('c', 0.0, 1.0, bound=True), Channel('m', 0.0, 1.0, bound=True), Channel('y', 0.0, 1.0, bound=True), Channel('k', 0.0, 1.0, bound=True))
CHANNEL_ALIASES = {'cyan': 'c', 'magenta': 'm', 'yellow': 'y', 'black': 'k'}
WHITE = WHITES['2deg']['D65']
def to_base(self, coords: Vector) -> Vector:
return cmyk_to_srgb(coords)
def from_base(self, coords: Vector) -> Vector:
return srgb_to_cmyk(coords) |
def test():
assert (len(pattern) == 2), '()'
assert (isinstance(pattern[0], dict) and isinstance(pattern[1], dict)), ''
assert ((len(pattern[0]) == 1) and (len(pattern[1]) == 1)), ''
assert any(((pattern[0].get(key) == '') for key in ['text', 'TEXT'])), '?'
assert any(((pattern[1].get(key) == 'PROPN') for key in ['pos', 'POS'])), '??'
__msg__.good('!') |
def __handle_plugin_missing_message(message, engine):
desc = GstPbutils.missing_plugin_message_get_description(message)
installer_details = GstPbutils.missing_plugin_message_get_installer_detail(message)
LOGGER.warning('A plugin for %s is missing, stopping playback', desc)
user_message = (_('A GStreamer 1.x plugin for %s is missing. Without this software installed, Exaile will not be able to play the current file. Please install the required software on your computer. See %s for details.') % (desc, MISSING_PLUGIN_URL))
engine.stop()
__notify_user_on_error(user_message, engine)
if GstPbutils.install_plugins_supported():
if __run_installer_helper(installer_details):
return
LOGGER.warning('Installation of GStreamer plugins not supported on this platform.') |
class EmailBodyTemplate():
def __init__(self, template_path: Path=DEFAULT_TEMPLATE_PATH):
self.template = self.__load_template(path=template_path)
def __load_template(path: Path):
log.debug(f'Read template from {path}')
with open(str(path)) as template_file:
return Template(template_file.read())
def render(self, date: datetime, compared_date: datetime, shift_up: pd.DataFrame, shift_down: pd.DataFrame, company: str, company_position: pd.DataFrame, solutionshub_osci_change_ranking: str, osci_reports_urls: dict) -> str:
return self.template.render(date=date, compared_date=compared_date, shift_up=shift_up.to_html(index=False), shift_down=shift_down.to_html(index=False), company=company, company_position=company_position.to_html(index=False), solutionshub_osci_change_ranking=solutionshub_osci_change_ranking, **osci_reports_urls) |
def emit_instance(op, for_profiler, f_instance_convertor=gemm_permute_instance, emit_kernel=False, func_attrs=None):
import cutlass_lib
emiter = cutlass_lib.gemm_operation.EmitGemmInstance()
if emit_kernel:
emiter = cutlass_lib.gemm_operation.EmitGemmPermuteInstance()
op_def = emiter.emit(op)
op_def = f_instance_convertor(op_def, func_attrs, for_profiler)
return op_def |
def test_get_extended_debug_logger_if_other_logger_in_cache():
path = f'testing.{uuid.uuid4()}'
normal_logger = get_logger(path)
assert (not isinstance(normal_logger, ExtendedDebugLogger))
assert (normal_logger.name == path)
extended_logger = get_extended_debug_logger(path)
assert isinstance(extended_logger, ExtendedDebugLogger)
assert (extended_logger.name == path) |
('transaction')
_decorator
def transaction():
iban = click.prompt("Recipient's IBAN (spaces are allowed): ", type=str)
bic = click.prompt("Recipient's BIC (optional): ", type=str, default='', show_default=False)
name = click.prompt("Recipient's name: ", type=str)
reference = click.prompt('Transfer reference (optional): ', type=str, default='', show_default=False)
amount = click.prompt('Transfer amount (only numeric value, dot separated): ', type=str)
pin = click.prompt('Please enter your PIN (input is hidden): ', hide_input=True, type=str)
response = API_CLIENT.create_transaction(iban, bic, name, reference, amount, pin)
if JSON_OUTPUT:
_print_json(response) |
def test_add_inside_old_new_behaviour(reekset):
(_poi, pol) = reekset
poi1 = _poi.copy()
poi1.dataframe.Z_TVDSS = 0.0
poi2 = _poi.copy()
poi2.dataframe.Z_TVDSS = 0.0
poi1.add_inside(pol, 1)
print(poi1.dataframe)
zvec = poi1.dataframe['Z_TVDSS'].values
assert (2.0 in zvec.tolist())
zvec = zvec[(zvec < 1)]
assert (zvec.size == 19)
poi2.add_inside_polygons(pol, 1)
zvec = poi2.dataframe['Z_TVDSS'].values
assert (2.0 not in zvec.tolist())
zvec = zvec[(zvec < 1)]
print(poi2.dataframe)
assert (zvec.size == 19) |
class PixelBuf():
def __init__(self, size: int, *, byteorder: str='BGR', brightness: float=1.0, auto_write: bool=False, header: Optional[bytes]=None, trailer: Optional[bytes]=None):
(bpp, byteorder_tuple, has_white, dotstar_mode) = self.parse_byteorder(byteorder)
self.auto_write = False
effective_bpp = (4 if dotstar_mode else bpp)
_bytes = (effective_bpp * size)
buf = bytearray(_bytes)
offset = 0
if (header is not None):
if (not isinstance(header, bytearray)):
raise TypeError('header must be a bytearray')
buf = (header + buf)
offset = len(header)
if (trailer is not None):
if (not isinstance(trailer, bytearray)):
raise TypeError('trailer must be a bytearray')
buf += trailer
self._pixels = size
self._bytes = _bytes
self._byteorder = byteorder_tuple
self._byteorder_string = byteorder
self._has_white = has_white
self._bpp = bpp
self._pre_brightness_buffer = None
self._post_brightness_buffer = buf
self._offset = offset
self._dotstar_mode = dotstar_mode
self._pixel_step = effective_bpp
if dotstar_mode:
self._byteorder_tuple = ((byteorder_tuple[0] + 1), (byteorder_tuple[1] + 1), (byteorder_tuple[2] + 1), 0)
for i in range(self._offset, (self._bytes + self._offset), 4):
self._post_brightness_buffer[i] = DOTSTAR_LED_START_FULL_BRIGHT
self._brightness = 1.0
self.brightness = brightness
self.auto_write = auto_write
def parse_byteorder(byteorder: str) -> Tuple[(int, str, bool, bool)]:
bpp = len(byteorder)
dotstar_mode = False
has_white = False
if (byteorder.strip('RGBWP') != ''):
raise ValueError('Invalid Byteorder string')
try:
r = byteorder.index('R')
g = byteorder.index('G')
b = byteorder.index('B')
except ValueError as exc:
raise ValueError('Invalid Byteorder string') from exc
if ('W' in byteorder):
w = byteorder.index('W')
byteorder = (r, g, b, w)
has_white = True
elif ('P' in byteorder):
lum = byteorder.index('P')
byteorder = (r, g, b, lum)
dotstar_mode = True
else:
byteorder = (r, g, b)
return (bpp, byteorder, has_white, dotstar_mode)
def bpp(self):
return self._bpp
def brightness(self):
return self._brightness
def brightness(self, value: float):
value = min(max(value, 0.0), 1.0)
change = (value - self._brightness)
if ((- 0.001) < change < 0.001):
return
self._brightness = value
if (self._pre_brightness_buffer is None):
self._pre_brightness_buffer = bytearray(self._post_brightness_buffer)
offset_check = (self._offset % self._pixel_step)
for i in range(self._offset, (self._bytes + self._offset)):
if (self._dotstar_mode and ((i % 4) != offset_check)):
continue
self._post_brightness_buffer[i] = int((self._pre_brightness_buffer[i] * self._brightness))
if self.auto_write:
self.show()
def byteorder(self):
return self._byteorder_string
def __len__(self):
return self._pixels
def show(self):
return self._transmit(self._post_brightness_buffer)
def fill(self, color: ColorUnion):
(r, g, b, w) = self._parse_color(color)
for i in range(self._pixels):
self._set_item(i, r, g, b, w)
if self.auto_write:
self.show()
def _parse_color(self, value: ColorUnion) -> Tuple[(int, int, int, int)]:
r = 0
g = 0
b = 0
w = 0
if isinstance(value, int):
r = (value >> 16)
g = ((value >> 8) & 255)
b = (value & 255)
w = 0
if self._dotstar_mode:
w = 1.0
else:
if ((len(value) < 3) or (len(value) > 4)):
raise ValueError('Expected tuple of length {}, got {}'.format(self._bpp, len(value)))
if (len(value) == self._bpp):
if (self._bpp == 3):
(r, g, b) = value
else:
(r, g, b, w) = value
elif (len(value) == 3):
(r, g, b) = value
if self._dotstar_mode:
w = 1.0
if (self._bpp == 4):
if self._dotstar_mode:
w = ((int((w * 31)) & 31) | DOTSTAR_LED_START)
elif (self._has_white and (isinstance(value, int) or (len(value) == 3)) and (r == g) and (g == b)):
w = r
r = 0
g = 0
b = 0
return (r, g, b, w)
def _set_item(self, index: int, r: int, g: int, b: int, w: int):
if (index < 0):
index += len(self)
if ((index >= self._pixels) or (index < 0)):
raise IndexError
offset = (self._offset + (index * self._bpp))
if (self._pre_brightness_buffer is not None):
if (self._bpp == 4):
self._pre_brightness_buffer[(offset + self._byteorder[3])] = w
self._pre_brightness_buffer[(offset + self._byteorder[0])] = r
self._pre_brightness_buffer[(offset + self._byteorder[1])] = g
self._pre_brightness_buffer[(offset + self._byteorder[2])] = b
if (self._bpp == 4):
if (not self._dotstar_mode):
w = int((w * self._brightness))
self._post_brightness_buffer[(offset + self._byteorder[3])] = w
self._post_brightness_buffer[(offset + self._byteorder[0])] = int((r * self._brightness))
self._post_brightness_buffer[(offset + self._byteorder[1])] = int((g * self._brightness))
self._post_brightness_buffer[(offset + self._byteorder[2])] = int((b * self._brightness))
def __setitem__(self, index: Union[(int, slice)], val: Union[(ColorUnion, Sequence[ColorUnion])]):
if isinstance(index, slice):
(start, stop, step) = index.indices(self._pixels)
for (val_i, in_i) in enumerate(range(start, stop, step)):
(r, g, b, w) = self._parse_color(val[val_i])
self._set_item(in_i, r, g, b, w)
else:
(r, g, b, w) = self._parse_color(val)
self._set_item(index, r, g, b, w)
if self.auto_write:
self.show()
def _getitem(self, index: int):
start = (self._offset + (index * self._bpp))
buffer = (self._pre_brightness_buffer if (self._pre_brightness_buffer is not None) else self._post_brightness_buffer)
value = [buffer[(start + self._byteorder[0])], buffer[(start + self._byteorder[1])], buffer[(start + self._byteorder[2])]]
if self._has_white:
value.append(buffer[(start + self._byteorder[3])])
elif self._dotstar_mode:
value.append(((buffer[(start + self._byteorder[3])] & DOTSTAR_LED_BRIGHTNESS) / 31.0))
return value
def __getitem__(self, index: Union[(int, slice)]):
if isinstance(index, slice):
out = []
for in_i in range(*index.indices((len(self._post_brightness_buffer) // self._bpp))):
out.append(self._getitem(in_i))
return out
if (index < 0):
index += len(self)
if ((index >= self._pixels) or (index < 0)):
raise IndexError
return self._getitem(index)
def _transmit(self, buffer: bytearray):
raise NotImplementedError('Must be subclassed') |
class TimeActivity(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin):
class_dict = {'VendorRef': Ref, 'CustomerRef': Ref, 'DepartmentRef': Ref, 'EmployeeRef': Ref, 'ItemRef': Ref, 'ClassRef': Ref, 'AttachableRef': AttachableRef}
qbo_object_name = 'TimeActivity'
def __init__(self):
super(TimeActivity, self).__init__()
self.NameOf = ''
self.TxnDate = None
self.BillableStatus = None
self.Taxable = False
self.HourlyRate = None
self.Hours = None
self.Minutes = None
self.BreakHours = None
self.BreakMinutes = None
self.StartTime = None
self.EndTime = None
self.Description = None
self.VendorRef = None
self.CustomerRef = None
self.DepartmentRef = None
self.EmployeeRef = None
self.ItemRef = None
self.ClassRef = None
self.AttachableRef = None
def __str__(self):
return self.NameOf |
def load(manifest_name: str) -> str:
if (manifest_name in _file_cache):
return _file_cache[manifest_name]
manifest_dir = __file__[:(- len('.py'))]
manifest_file = os.path.join(manifest_dir, (manifest_name + '.yaml'))
manifest_content = open(manifest_file, 'r').read()
_file_cache[manifest_name] = manifest_content
return manifest_content |
class isDecimal(isFloat):
def check(self, value):
try:
if isinstance(value, decimal.Decimal):
v = value
else:
v = decimal.Decimal(str(value).replace(self.dot, '.'))
return (v, None)
except (ValueError, TypeError, decimal.InvalidOperation):
return (value, translate(self.message)) |
def process_line(x):
(line, add_blank) = x
device = config.bert_gen_config.device
if config.bert_gen_config.use_multi_device:
rank = mp.current_process()._identity
rank = (rank[0] if (len(rank) > 0) else 0)
if torch.cuda.is_available():
gpu_id = (rank % torch.cuda.device_count())
device = torch.device(f'cuda:{gpu_id}')
else:
device = torch.device('cpu')
(wav_path, _, language_str, text, phones, tone, word2ph) = line.strip().split('|')
phone = phones.split(' ')
tone = [int(i) for i in tone.split(' ')]
word2ph = [int(i) for i in word2ph.split(' ')]
word2ph = [i for i in word2ph]
(phone, tone, language) = cleaned_text_to_sequence(phone, tone, language_str)
if add_blank:
phone = commons.intersperse(phone, 0)
tone = commons.intersperse(tone, 0)
language = commons.intersperse(language, 0)
for i in range(len(word2ph)):
word2ph[i] = (word2ph[i] * 2)
word2ph[0] += 1
bert_path = wav_path.replace('.WAV', '.wav').replace('.wav', '.bert.pt')
try:
bert = torch.load(bert_path)
assert (bert.shape[(- 1)] == len(phone))
except Exception:
bert = get_bert(text, word2ph, language_str, device)
assert (bert.shape[(- 1)] == len(phone))
torch.save(bert, bert_path) |
def test_custom_fieldnames(db, db_path, geocoder):
runner = CliRunner()
table = db[TABLE_NAME]
geo_table = db[GEO_TABLE]
result = runner.invoke(cli, ['test', str(db_path), str(TABLE_NAME), '-p', str(db_path), '-l', '{id}', '-d', '0', '--latitude', 'lat', '--longitude', 'lng'])
print(result.stdout)
assert (0 == result.exit_code)
for row in table.rows:
assert (type(row.get('lat')) == float)
assert (type(row.get('lng')) == float)
result = geo_table.get(row['id']) |
class OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptions(Options):
def activeWhen(self) -> 'OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptionsPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
class OptionSeriesAreaSonificationTracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class recursive_iterator(_coconut_baseclass):
__slots__ = ('func', 'reit_store', 'backup_reit_store')
def __init__(self, func):
self.func = func
self.reit_store = _coconut.dict()
self.backup_reit_store = []
def __call__(self, *args, **kwargs):
key = (args, _coconut.frozenset(kwargs.items()))
use_backup = False
try:
_coconut.hash(key)
except _coconut.Exception:
try:
key = _coconut.pickle.dumps(key, (- 1))
except _coconut.Exception:
use_backup = True
if use_backup:
for (k, v) in self.backup_reit_store:
if (k == key):
return reit
reit = reiterable(self.func(*args, **kwargs))
self.backup_reit_store.append([key, reit])
return reit
else:
reit = self.reit_store.get(key)
if (reit is None):
reit = reiterable(self.func(*args, **kwargs))
self.reit_store[key] = reit
return reit
def __repr__(self):
return ('recursive_iterator(%r)' % (self.func,))
def __reduce__(self):
return (self.__class__, (self.func,))
def __get__(self, obj, objtype=None):
if (obj is None):
return self
if (_coconut_sys.version_info < (3,)):
return _coconut.types.MethodType(self, obj, objtype)
else:
return _coconut.types.MethodType(self, obj) |
def DoSinkAlloc(alloc_cursor, scope_cursor):
alloc_stmt = alloc_cursor._node
scope_stmt = scope_cursor._node
assert isinstance(alloc_stmt, LoopIR.Alloc)
assert isinstance(scope_stmt, (LoopIR.If, LoopIR.For))
after_scope = [s._node for s in get_rest_of_block(scope_cursor)]
accesses = (get_reads_of_stmts(after_scope) + get_writes_of_stmts(after_scope))
if (alloc_stmt.name in [name for (name, _) in accesses]):
raise SchedulingError(f'Cannot sink allocation {alloc_stmt} because the buffer is accessed outside of the scope provided.')
(ir, fwd) = alloc_cursor._move(scope_cursor.body()[0].before())
if (isinstance(scope_stmt, LoopIR.If) and (len(scope_stmt.orelse) > 0)):
else_alloc = Alpha_Rename([alloc_stmt]).result()
(ir, fwd_ins) = fwd(scope_cursor).orelse()[0].before()._insert(else_alloc)
fwd = _compose(fwd_ins, fwd)
return (ir, fwd) |
class PostHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if (content and hasattr(self.server, 'influx_log')):
with open(self.server.influx_log, 'a', encoding='utf-8') as influx_log:
influx_log.write((content + '\n')) |
.parametrize('pipe_name', ['experimental_char_tagger_tokenizer', 'experimental_char_ner_tokenizer'])
def test_char_tokenizer_overfitting(pipe_name):
texts = ['This is a sentence.', 'Here is a short, boring sentence.', 'Here is another!']
nlp = spacy.blank('en')
train_docs = [nlp.make_doc(text) for text in texts]
nlp = spacy.blank('en', vocab=nlp.vocab, config={'nlp': {'tokenizer': {'': 'spacy-experimental.char_pretokenizer.v1'}}})
nlp.add_pipe(pipe_name)
train_examples = [Example(nlp.make_doc(doc.text), doc) for doc in train_docs]
optimizer = nlp.initialize(get_examples=(lambda : train_examples))
for i in range(50):
nlp.update(train_examples, sgd=optimizer)
for (train_doc, test_doc) in zip(train_docs, nlp.pipe(texts)):
assert (train_doc.text == test_doc.text)
assert ([t.text for t in train_doc] == [t.text for t in test_doc]) |
class TestSamplingGeneration(TestCase):
def test_sampling(self):
d = 4
order = 5
param = Parameter(distribution='uniform', order=order, lower=(- 1.0), upper=1.0)
myparameters = [param for _ in range(d)]
mybasis = Basis('total-order')
mypoly = Poly(myparameters, mybasis, method='least-squares', sampling_args={'mesh': 'induced', 'subsampling-algorithm': 'qr', 'sampling-ratio': 1})
assert (mypoly._quadrature_points.shape == (mypoly.basis.cardinality, d))
def test_induced_sampling(self):
dimension = 3
parameters = ([Parameter(3, 'Uniform', upper=1, lower=(- 1))] * dimension)
basis = Basis('total-order', ([3] * dimension))
induced_sampling = Induced(parameters, basis)
quadrature_points = induced_sampling.get_points()
assert (quadrature_points.shape == (induced_sampling.samples_number, 3)) |
class Ejs(javascript.Javascript):
def init(self):
self.update_actions({'render': {'header': "<%%- '%(header)s'+", 'trailer': "+'%(trailer)s' %%>"}, 'write': {'write': "<%%global.process.mainModule.require('fs').appendFileSync('%(path)s', Buffer('%(chunk_b64)s', 'base64'), 'binary')%%>", 'truncate': "<%%global.process.mainModule.require('fs').writeFileSync('%(path)s', '')%%>"}, 'read': {'read': "global.process.mainModule.require('fs').readFileSync('%(path)s').toString('base64')"}, 'md5': {'md5': 'global.process.mainModule.require(\'crypto\').createHash(\'md5\').update(global.process.mainModule.require(\'fs\').readFileSync(\'%(path)s\')).digest("hex")'}, 'evaluate': {'test_os': "global.process.mainModule.require('os').platform()"}, 'execute_blind': {'execute_blind': "<%%global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString() + ' && sleep %(delay)i')%%>"}, 'execute': {'execute': "global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString())"}})
self.set_contexts([{'level': 0}, {'level': 1, 'prefix': '%(closure)s%%>', 'suffix': '<%%#', 'closures': javascript.ctx_closures}, {'level': 2, 'prefix': '%(closure)s%%>', 'suffix': '<%%#', 'closures': {1: ["'", ')'], 2: ['"', ')']}}, {'level': 3, 'prefix': '*/%%>', 'suffix': '<%%#'}]) |
class OptionSeriesVariwideDataDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
.parametrize('transaction_info', ETH_TEST_TRANSACTIONS)
def test_eth_account_sign_transaction_from_eth_test(acct, transaction_info):
expected_raw_txn = transaction_info['signed']
key = transaction_info['key']
transaction = dissoc(transaction_info, 'signed', 'key', 'unsigned')
signed = acct.sign_transaction(transaction, key)
assert (signed.r == Web3.to_int(hexstr=expected_raw_txn[(- 130):(- 66)]))
expected_sender = acct.from_key(key).address
assert (acct.recover_transaction(signed.rawTransaction) == expected_sender) |
def test_counts_episodes_that_skip_and_error_in_reset():
env = build_dummy_maze_env()
env = _StepSkippingAndErrorInResetWrapper.wrap(env)
env = LogStatsWrapper.wrap(env)
with pytest.raises(RuntimeError):
env.reset()
assert (len(env.episode_event_log.step_event_logs) == 1)
env.write_epoch_stats()
assert (env.get_stats_value(RewardEvents.reward_original, LogStatsLevel.EPOCH, name='total_step_count') == 1)
assert (env.get_stats_value(RewardEvents.reward_original, LogStatsLevel.EPOCH, name='episode_count') == 1)
assert (env.get_stats_value(RewardEvents.reward_original, LogStatsLevel.EPOCH, name='total_episode_count') == 1)
with pytest.raises(KeyError):
env.get_stats_value(BaseEnvEvents.reward, LogStatsLevel.EPOCH, name='total_step_count') |
class DockerSession(object):
def __init__(self, work_dir=None):
if (work_dir is None):
work_dir = os.getcwd()
self._docker = docker.Client()
self._container = self._docker.create_container(image='s3it/pythonista', command='/bin/sleep 1d', working_dir='/src', user=os.geteuid(), volumes=['/src'], host_config=self._docker.create_host_config(binds={work_dir: {'bind': '/src', 'mode': 'rw'}}))
self._container_id = self._container[u'Id']
self._docker.start(self._container_id)
self._running = True
logging.info("Session %s will execute commands in container '%s'", self, self._container_id)
def run(self, cmd, shell=True):
assert self._running
if shell:
cmd = 'sh -c "{cmd}"'.format(**locals())
e = self._docker.exec_create(self._container_id, cmd)
e_id = e[u'Id']
logging.info("Running '%s' ...", cmd)
output = self._docker.exec_start(e_id)
details = self._docker.exec_inspect(e_id)
exitcode = details[u'ExitCode']
logging.debug("Command '%s' exited with code %d and output '%s'", cmd, exitcode, output)
with open('session.log', 'a') as w:
w.write('\nCOMMAND: {cmd}\nEXITCODE: {exitcode}\nOUTPUT:\n{output}\n '.format(**locals()))
return (exitcode, output)
def done(self):
logging.info('Terminating session %s ...', self)
self._docker.stop(self._container_id, timeout=1)
self._running = False
self._docker.remove_container(self._container_id, force=True)
logging.debug("Removed Docker container '%s'", self._container_id)
def __enter__(self):
return self
def __exit__(self, *args):
self.done() |
.usefixtures('use_tmpdir')
def test_that_private_over_global_args_does_not_give_logging_message_for_argpassing(caplog):
caplog.set_level(logging.INFO)
with open('job_file', 'w', encoding='utf-8') as fout:
fout.write(dedent('\n EXECUTABLE echo\n ARGLIST <ARG>\n ARG_TYPE 0 STRING\n '))
with open('config_file.ert', 'w', encoding='utf-8') as fout:
fout.write('NUM_REALIZATIONS 1\n')
fout.write('DEFINE <ARG> A\n')
fout.write('INSTALL_JOB job_name job_file\n')
fout.write('FORWARD_MODEL job_name(<ARG>=<ARG>)')
ert_config = ErtConfig.from_file('config_file.ert')
job_data = ert_config.forward_model_data_to_json('', 0, 0)['jobList'][0]
assert (len(ert_config.forward_model_list) == 1)
assert (job_data['argList'] == ['A'])
assert ("Private arg '<ARG>':'<ARG>' chosen over global 'A'" not in caplog.text) |
class OptionPlotoptionsAreaOnpointConnectoroptions(Options):
def dashstyle(self):
return self._config_get(None)
def dashstyle(self, text: str):
self._config(text, js_type=False)
def stroke(self):
return self._config_get(None)
def stroke(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(1)
def width(self, num: float):
self._config(num, js_type=False) |
class TestsRGBLinear(util.ColorAssertsPyTest):
COLORS = [('red', 'color(srgb-linear 1 0 0)'), ('orange', 'color(srgb-linear 1 0.37626 0)'), ('yellow', 'color(srgb-linear 1 1 0)'), ('green', 'color(srgb-linear 0 0.21586 0)'), ('blue', 'color(srgb-linear 0 0 1)'), ('indigo', 'color(srgb-linear 0.07036 0 0.22323)'), ('violet', 'color(srgb-linear 0.85499 0.22323 0.85499)'), ('white', 'color(srgb-linear 1 1 1)'), ('gray', 'color(srgb-linear 0.21586 0.21586 0.21586)'), ('black', 'color(srgb-linear 0 0 0)'), ('color(srgb-linear 0 0.50196 0)', 'color(srgb-linear 0 0.50196 0)'), ('color(srgb-linear 0 0.50196 0 / 0.5)', 'color(srgb-linear 0 0.50196 0 / 0.5)'), ('color(srgb-linear 50% 50% 50% / 50%)', 'color(srgb-linear 0.5 0.5 0.5 / 0.5)'), ('color(srgb-linear none none none / none)', 'color(srgb-linear none none none / none)'), ('color(srgb-linear 0% 0% 0%)', 'color(srgb-linear 0 0 0)'), ('color(srgb-linear 100% 100% 100%)', 'color(srgb-linear 1 1 1)'), ('color(srgb-linear -100% -100% -100%)', 'color(srgb-linear -1 -1 -1)')]
.parametrize('color1,color2', COLORS)
def test_colors(self, color1, color2):
self.assertColorEqual(Color(color1).convert('srgb-linear'), Color(color2)) |
class active(bsn_tlv):
type = 192
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack('!H', self.type))
packed.append(struct.pack('!H', 0))
length = sum([len(x) for x in packed])
packed[1] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = active()
_type = reader.read('!H')[0]
assert (_type == 192)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
return True
def pretty_print(self, q):
q.text('active {')
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}') |
def location_img_upload_to(instance, filename):
ext = filename.split('.')[(- 1)]
filename = ('%s.%s' % (uuid.uuid4(), ext.lower()))
upload_path = 'locations/'
upload_abs_path = os.path.join(settings.MEDIA_ROOT, upload_path)
if (not os.path.exists(upload_abs_path)):
os.makedirs(upload_abs_path)
return os.path.join(upload_path, filename) |
def _get_ir_config(yaml):
aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_yaml(yaml)
aconf.load_all(fetcher.sorted())
secret_handler = NullSecretHandler(logger, None, None, '0')
ir = IR(aconf, file_checker=(lambda path: True), secret_handler=secret_handler)
assert ir
return ir |
class StableDiffusion_1_Inpainting(StableDiffusion_1):
def __init__(self, unet: (SD1UNet | None)=None, lda: (SD1Autoencoder | None)=None, clip_text_encoder: (CLIPTextEncoderL | None)=None, scheduler: (Scheduler | None)=None, device: (Device | str)='cpu', dtype: DType=torch.float32) -> None:
self.mask_latents: (Tensor | None) = None
self.target_image_latents: (Tensor | None) = None
super().__init__(unet=unet, lda=lda, clip_text_encoder=clip_text_encoder, scheduler=scheduler, device=device, dtype=dtype)
def forward(self, x: Tensor, step: int, *, clip_text_embedding: Tensor, condition_scale: float=7.5, **_: Tensor) -> Tensor:
assert (self.mask_latents is not None)
assert (self.target_image_latents is not None)
x = torch.cat(tensors=(x, self.mask_latents, self.target_image_latents), dim=1)
return super().forward(x=x, step=step, clip_text_embedding=clip_text_embedding, condition_scale=condition_scale)
def set_inpainting_conditions(self, target_image: Image.Image, mask: Image.Image, latents_size: tuple[(int, int)]=(64, 64)) -> tuple[(Tensor, Tensor)]:
target_image = target_image.convert(mode='RGB')
mask = mask.convert(mode='L')
mask_tensor = torch.tensor(data=(np.array(object=mask).astype(dtype=np.float32) / 255.0)).to(device=self.device)
mask_tensor = (mask_tensor > 0.5).unsqueeze(dim=0).unsqueeze(dim=0).to(dtype=self.dtype)
self.mask_latents = interpolate(x=mask_tensor, factor=torch.Size(latents_size))
init_image_tensor = ((image_to_tensor(image=target_image, device=self.device, dtype=self.dtype) * 2) - 1)
masked_init_image = (init_image_tensor * (1 - mask_tensor))
self.target_image_latents = self.lda.encode(x=masked_init_image)
return (self.mask_latents, self.target_image_latents)
def compute_self_attention_guidance(self, x: Tensor, noise: Tensor, step: int, *, clip_text_embedding: Tensor, **kwargs: Tensor) -> Tensor:
sag = self._find_sag_adapter()
assert (sag is not None)
assert (self.mask_latents is not None)
assert (self.target_image_latents is not None)
degraded_latents = sag.compute_degraded_latents(scheduler=self.scheduler, latents=x, noise=noise, step=step, classifier_free_guidance=True)
(negative_embedding, _) = clip_text_embedding.chunk(2)
timestep = self.scheduler.timesteps[step].unsqueeze(dim=0)
self.set_unet_context(timestep=timestep, clip_text_embedding=negative_embedding, **kwargs)
x = torch.cat(tensors=(degraded_latents, self.mask_latents, self.target_image_latents), dim=1)
degraded_noise = self.unet(x)
return (sag.scale * (noise - degraded_noise)) |
class AtmosphericLayer(OpticalElement):
def __init__(self, input_grid, Cn_squared=None, L0=np.inf, velocity=0, height=0):
self.input_grid = input_grid
self.Cn_squared = Cn_squared
self.L0 = L0
self._velocity = None
self.velocity = velocity
self.height = height
self._t = 0
def evolve_until(self, t):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def t(self):
return self._t
def t(self, t):
self.evolve_until(t)
def Cn_squared(self):
return self._Cn_squared
_squared.setter
def Cn_squared(self, Cn_squared):
raise NotImplementedError()
def outer_scale(self):
return self._outer_scale
_scale.setter
def outer_scale(self, L0):
raise NotImplementedError()
def L0(self):
return self.outer_scale
.setter
def L0(self, L0):
self.outer_scale = L0
def velocity(self):
return self._velocity
def velocity(self, velocity):
if np.isscalar(velocity):
self._velocity = np.array([velocity, 0])
else:
self._velocity = np.array(velocity)
def phase_for(self, wavelength):
raise NotImplementedError()
def output_grid(self):
return self.input_grid
def forward(self, wf):
wf = wf.copy()
wf.electric_field *= np.exp((1j * self.phase_for(wf.wavelength)))
return wf
def backward(self, wf):
wf = wf.copy()
wf.electric_field *= np.exp(((- 1j) * self.phase_for(wf.wavelength)))
return wf |
class TestAddProtocolFailsWhenProtocolWithSameAuthorAndNameButDifferentVersion():
def setup_class(cls):
cls.runner = CliRunner()
cls.agent_name = 'myagent'
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.protocol_id = GymMessage.protocol_id
cls.protocol_name = cls.protocol_id.name
cls.protocol_author = cls.protocol_id.author
cls.protocol_version = cls.protocol_id.version
shutil.copytree(Path(CUR_PATH, '..', 'packages'), Path(cls.t, 'packages'))
os.chdir(cls.t)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'init', '--author', AUTHOR])
assert (result.exit_code == 0)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'create', '--local', cls.agent_name], standalone_mode=False)
assert (result.exit_code == 0)
os.chdir(cls.agent_name)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'protocol', str(cls.protocol_id)], standalone_mode=False)
assert (result.exit_code == 0)
different_version = '0.1.1'
different_id = ((((cls.protocol_author + '/') + cls.protocol_name) + ':') + different_version)
config_path = Path(cls.t, 'packages', cls.protocol_author, 'protocols', cls.protocol_name, DEFAULT_PROTOCOL_CONFIG_FILE)
config = yaml.safe_load(config_path.open())
config['version'] = different_version
yaml.safe_dump(config, config_path.open(mode='w'))
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'protocol', different_id], standalone_mode=False)
def test_exit_code_equal_to_1(self):
assert (self.result.exit_code == 1)
def test_error_message_protocol_already_existing(self):
s = f"A protocol with id '{self.protocol_id}' already exists. Aborting..."
assert (self.result.exception.message == s)
.patch('aea.cli.add.get_package_path', return_value='dest/path')
.patch('aea.cli.add.fetch_package')
def test_add_protocol_from_registry_positive(self, fetch_package_mock, *mocks):
fetch_package_mock.return_value = Path('vendor/{}/protocols/{}'.format(self.protocol_author, self.protocol_name))
public_id = '{}/{}:{}'.format(AUTHOR, self.protocol_name, self.protocol_version)
obj_type = 'protocol'
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', obj_type, public_id], standalone_mode=False)
assert (result.exit_code == 0)
public_id_obj = PublicId.from_str(public_id)
fetch_package_mock.assert_called_once_with(obj_type, public_id=public_id_obj, cwd='.', dest='dest/path')
def teardown_class(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
.parametrize('manager', [PopupConfig], indirect=True)
def test_popup_mixin(manager, backend_name):
number = len(manager.c.internal_windows())
widget = manager.c.widget['moddedwidget']
assert (not widget.info()['text'])
widget.eval('self.update_or_show_popup()')
widget.eval('self.update_or_show_popup()')
assert_window_count(manager, (number + 1))
assert (widget.info()['text'] == 'Text set ok')
if (backend_name == 'x11'):
pytest.xfail('X11 fails last check.')
assert_window_count(manager, number) |
class Select2Field(fields.SelectField):
widget = admin_widgets.Select2Widget()
def __init__(self, label=None, validators=None, coerce=text_type, choices=None, allow_blank=False, blank_text=None, **kwargs):
super(Select2Field, self).__init__(label, validators, coerce, choices, **kwargs)
self.allow_blank = allow_blank
self.blank_text = (blank_text or ' ')
def iter_choices(self):
if self.allow_blank:
(yield (u'__None', self.blank_text, (self.data is None)))
for choice in self.choices:
if isinstance(choice, tuple):
(yield (choice[0], choice[1], (self.coerce(choice[0]) == self.data)))
else:
(yield (choice.value, choice.name, (self.coerce(choice.value) == self.data)))
def process_data(self, value):
if (value is None):
self.data = None
else:
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
if (valuelist[0] == '__None'):
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext(u'Invalid Choice: could not coerce'))
def pre_validate(self, form):
if (self.allow_blank and (self.data is None)):
return
super(Select2Field, self).pre_validate(form) |
class CopyPayload(Payload):
IDCOLNAME = '_osc_ID_'
DMLCOLNAME = '_osc_dml_type_'
DML_TYPE_INSERT = 1
DML_TYPE_DELETE = 2
DML_TYPE_UPDATE = 3
def __init__(self, *args, **kwargs):
super(CopyPayload, self).__init__(*args, **kwargs)
self._pk_for_filter = []
self._idx_name_for_filter = 'PRIMARY'
self._new_table = None
self._old_table = None
self._replayed_chg_ids = util.RangeChain()
self.select_chunk_size = 0
self.select_checksum_chunk_size = 0
self.bypass_replay_timeout = False
self.is_ttl_disabled_by_me = False
self.stop_before_swap = False
self.outfile_suffix_end = 0
self.outfile_suffix_start = 0
self.last_replayed_id = 0
self.last_checksumed_id = 0
self.current_checksum_record = (- 1)
self.table_size = 0
self.session_overrides = []
self.disable_replication = kwargs.get('disable_replication', True)
self._cleanup_payload = CleanupPayload(*args, **kwargs)
self.stats = {}
self.partitions = {}
self.eta_chunks = 1
self._last_kill_timer = None
self.table_swapped = False
self.current_catchup_start_time = 0
self.current_catchup_end_time = 0
self.max_id_to_replay_upto_for_good2go = (- 1)
self.under_transaction = False
self.checksum_required_for_replay = False
self.repl_status = kwargs.get('repl_status', '')
self.outfile_dir = kwargs.get('outfile_dir', '')
self.allow_new_pk = kwargs.get('allow_new_pk', False)
self.allow_drop_column = kwargs.get('allow_drop_column', False)
self.detailed_mismatch_info = kwargs.get('detailed_mismatch_info', False)
self.dump_after_checksum = kwargs.get('dump_after_checksum', False)
self.eliminate_dups = kwargs.get('eliminate_dups', False)
self.rm_partition = kwargs.get('rm_partition', False)
self.force_cleanup = kwargs.get('force_cleanup', False)
self.skip_cleanup_after_kill = kwargs.get('skip_cleanup_after_kill', False)
self.pre_load_statement = kwargs.get('pre_load_statement', '')
self.post_load_statement = kwargs.get('post_load_statement', '')
self.replay_max_attempt = kwargs.get('replay_max_attempt', constant.DEFAULT_REPLAY_ATTEMPT)
self.replay_timeout = kwargs.get('replay_timeout', constant.REPLAY_DEFAULT_TIMEOUT)
self.replay_batch_size = kwargs.get('replay_batch_size', constant.DEFAULT_BATCH_SIZE)
self.replay_group_size = kwargs.get('replay_group_size', constant.DEFAULT_REPLAY_GROUP_SIZE)
self.skip_pk_coverage_check = kwargs.get('skip_pk_coverage_check', False)
self.pk_coverage_size_threshold = kwargs.get('pk_coverage_size_threshold', constant.PK_COVERAGE_SIZE_THRESHOLD)
self.skip_long_trx_check = kwargs.get('skip_long_trx_check', False)
self.ddl_file_list = kwargs.get('ddl_file_list', '')
self.free_space_reserved_percent = kwargs.get('free_space_reserved_percent', constant.DEFAULT_RESERVED_SPACE_PERCENT)
self.long_trx_time = kwargs.get('long_trx_time', constant.LONG_TRX_TIME)
self.max_running_before_ddl = kwargs.get('max_running_before_ddl', constant.MAX_RUNNING_BEFORE_DDL)
self.ddl_guard_attempts = kwargs.get('ddl_guard_attempts', constant.DDL_GUARD_ATTEMPTS)
self.lock_max_attempts = kwargs.get('lock_max_attempts', constant.LOCK_MAX_ATTEMPTS)
self.lock_max_wait_before_kill_seconds = kwargs.get('lock_max_wait_before_kill_seconds', constant.LOCK_MAX_WAIT_BEFORE_KILL_SECONDS)
self.session_timeout = kwargs.get('mysql_session_timeout', constant.SESSION_TIMEOUT)
self.idx_recreation = kwargs.get('idx_recreation', False)
self.rocksdb_bulk_load_allow_sk = kwargs.get('rocksdb_bulk_load_allow_sk', False)
self.unblock_table_creation_without_pk = kwargs.get('unblock_table_creation_without_pk', False)
self.rebuild = kwargs.get('rebuild', False)
self.keep_tmp_table = kwargs.get('keep_tmp_table_after_exception', False)
self.skip_checksum = kwargs.get('skip_checksum', False)
self.skip_checksum_for_modified = kwargs.get('skip_checksum_for_modified', False)
self.skip_delta_checksum = kwargs.get('skip_delta_checksum', False)
self.skip_named_lock = kwargs.get('skip_named_lock', False)
self.skip_affected_rows_check = kwargs.get('skip_affected_rows_check', False)
self.where = kwargs.get('where', None)
self.session_overrides_str = kwargs.get('session_overrides', '')
self.fail_for_implicit_conv = kwargs.get('fail_for_implicit_conv', False)
self.max_wait_for_slow_query = kwargs.get('max_wait_for_slow_query', constant.MAX_WAIT_FOR_SLOW_QUERY)
self.max_replay_batch_size = kwargs.get('max_replay_batch_size', constant.MAX_REPLAY_BATCH_SIZE)
self.allow_unsafe_ts_bootstrap = kwargs.get('allow_unsafe_ts_bootstrap', False)
self.is_full_table_dump = False
self.replay_max_changes = kwargs.get('replay_max_changes', constant.MAX_REPLAY_CHANGES)
self.use_sql_wsenv = kwargs.get('use_sql_wsenv', False)
self.checksum_chunk_size = kwargs.get('chunk_size', constant.CHECKSUM_CHUNK_BYTES)
if self.use_sql_wsenv:
self.chunk_size = kwargs.get('chunk_size', constant.WSENV_CHUNK_BYTES)
self.skip_disk_space_check = kwargs.get('skip_disk_space_check', True)
if (not self.skip_disk_space_check):
raise OSCError('SKIP_DISK_SPACE_CHECK_VALUE_INCOMPATIBLE_WSENV')
if (not self.outfile_dir):
raise OSCError('OUTFILE_DIR_NOT_SPECIFIED_WSENV')
else:
self.chunk_size = kwargs.get('chunk_size', constant.CHUNK_BYTES)
self.skip_disk_space_check = kwargs.get('skip_disk_space_check', False)
self.enable_outfile_compression = kwargs.get('enable_outfile_compression', False)
self.compressed_outfile_extension = kwargs.get('compressed_outfile_extension', None)
self.max_id_now = 0
self.parse_function = parse_create
def current_db(self):
return self._current_db
def old_pk_list(self):
return [col.name for col in self._old_table.primary_key.column_list]
def dropped_column_name_list(self):
column_list = []
new_tbl_columns = [col.name for col in self._new_table.column_list]
for col in self._old_table.column_list:
if (col.name not in new_tbl_columns):
column_list.append(col.name)
return column_list
def old_column_list(self):
return [col.name for col in self._old_table.column_list if (col.name not in self.dropped_column_name_list)]
def old_non_pk_column_list(self):
return [col.name for col in self._old_table.column_list if ((col.name not in self._pk_for_filter) and (col.name not in self.dropped_column_name_list))]
def checksum_column_list(self):
column_list = []
old_pk_name_list = [c.name for c in self._old_table.primary_key.column_list]
for col in self._old_table.column_list:
if (col.name in old_pk_name_list):
continue
if (col.name in self.dropped_column_name_list):
continue
new_columns = {col.name: col for col in self._new_table.column_list}
if (col != new_columns[col.name]):
if self.skip_checksum_for_modified:
continue
column_list.append(col.name)
return column_list
def delta_table_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.DELTA_TABLE_PREFIX + self._old_table.name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_DELTA_TABLE_PREFIX + self._old_table.name)
else:
return (constant.DELTA_TABLE_PREFIX + constant.GENERIC_TABLE_NAME)
def table_name(self):
return self._new_table.name
def new_table_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.NEW_TABLE_PREFIX + self.table_name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_NEW_TABLE_PREFIX + self.table_name)
else:
return (constant.NEW_TABLE_PREFIX + constant.GENERIC_TABLE_NAME)
def renamed_table_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.RENAMED_TABLE_PREFIX + self._old_table.name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_RENAMED_TABLE_PREFIX + self._old_table.name)
else:
return (constant.RENAMED_TABLE_PREFIX + constant.GENERIC_TABLE_NAME)
def insert_trigger_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.INSERT_TRIGGER_PREFIX + self._old_table.name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_INSERT_TRIGGER_PREFIX + self._old_table.name)
else:
return (constant.INSERT_TRIGGER_PREFIX + constant.GENERIC_TABLE_NAME)
def update_trigger_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.UPDATE_TRIGGER_PREFIX + self._old_table.name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_UPDATE_TRIGGER_PREFIX + self._old_table.name)
else:
return (constant.UPDATE_TRIGGER_PREFIX + constant.GENERIC_TABLE_NAME)
def delete_trigger_name(self):
if (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 10)):
return (constant.DELETE_TRIGGER_PREFIX + self._old_table.name)
elif ((len(self._old_table.name) >= (constant.MAX_TABLE_LENGTH - 10)) and (len(self._old_table.name) < (constant.MAX_TABLE_LENGTH - 2))):
return (constant.SHORT_DELETE_TRIGGER_PREFIX + self._old_table.name)
else:
return (constant.DELETE_TRIGGER_PREFIX + constant.GENERIC_TABLE_NAME)
def outfile(self):
return os.path.join(self.outfile_dir, (constant.OUTFILE_TABLE + self.table_name))
def tmp_table_exclude_id(self):
return '__osc_temp_ids_to_exclude'
def tmp_table_include_id(self):
return '__osc_temp_ids_to_include'
def outfile_exclude_id(self):
return os.path.join(self.outfile_dir, (constant.OUTFILE_EXCLUDE_ID + self.table_name))
def outfile_include_id(self):
return os.path.join(self.outfile_dir, (constant.OUTFILE_INCLUDE_ID + self.table_name))
def droppable_indexes(self):
if (not self.idx_recreation):
return []
return self._new_table.droppable_indexes(keep_unique_key=self.eliminate_dups)
def _outfile_extension(self, skip_compressed_extension: bool=False) -> str:
if ((not skip_compressed_extension) and self.enable_outfile_compression and self.compressed_outfile_extension):
return '.{}.{}'.format(0, self.compressed_outfile_extension)
else:
return ''
def _outfile_name(self, chunk_id: int, suffix: Optional[str]=None, skip_compressed_extension: bool=False) -> str:
return '{}{}.{}{}'.format(self.outfile, (suffix or ''), chunk_id, self._outfile_extension(skip_compressed_extension=skip_compressed_extension))
def set_tx_isolation(self):
if self.mysql_version.is_mysql8:
self.execute_sql(sql.set_session_variable('transaction_isolation'), ('REPEATABLE-READ',))
else:
self.execute_sql(sql.set_session_variable('tx_isolation'), ('REPEATABLE-READ',))
def set_sql_mode(self):
self.execute_sql(sql.set_session_variable('sql_mode'), ('STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO',))
def parse_session_overrides_str(self, overrides_str):
overrides = []
if ((overrides_str is None) or (overrides_str == '')):
return []
for section in overrides_str.split(';'):
splitted_array = section.split('=')
if ((len(splitted_array) != 2) or (splitted_array[0] == '') or (splitted_array[1] == '')):
raise OSCError('INCORRECT_SESSION_OVERRIDE', {'section': section})
overrides.append(splitted_array)
return overrides
def override_session_vars(self):
self.session_overrides = self.parse_session_overrides_str(self.session_overrides_str)
for (var_name, var_value) in self.session_overrides:
log.info('Override session variable {} with value: {}'.format(var_name, var_value))
self.execute_sql(sql.set_session_variable(var_name), (var_value,))
def is_var_enabled(self, var_name):
if (var_name not in self.mysql_vars):
return False
if (self.mysql_vars[var_name] == 'OFF'):
return False
if (self.mysql_vars[var_name] == '0'):
return False
return True
def is_trigger_rbr_safe(self):
if (self.mysql_vars['binlog_format'] == 'ROW'):
if self.mysql_version.is_fb:
if (not self.is_var_enabled('sql_log_bin_triggers')):
return True
else:
return False
else:
return False
else:
return True
def is_myrocks_table(self):
if (not self._new_table.engine):
return False
return (self._new_table.engine.upper() == 'ROCKSDB')
def is_myrocks_ttl_table(self):
return self._new_table.is_myrocks_ttl_table
def sanity_checks(self):
if (not self.is_trigger_rbr_safe):
raise OSCError('NOT_RBR_SAFE')
def skip_cache_fill_for_myrocks(self):
if ('rocksdb_skip_fill_cache' in self.mysql_vars):
self.execute_sql(sql.set_session_variable('rocksdb_skip_fill_cache'), (1,))
_hook
def init_connection(self, db):
log.info('== Stage 1: Init ==')
self.use_db(db)
self.set_no_binlog()
self.get_mysql_settings()
self.init_mysql_version()
self.sanity_checks()
self.set_tx_isolation()
self.set_sql_mode()
self.enable_priority_ddl()
self.skip_cache_fill_for_myrocks()
self.enable_sql_wsenv()
self.override_session_vars()
self.get_osc_lock()
def table_exists(self, table_name):
table_exists = self.query(sql.table_existence, (table_name, self._current_db))
return bool(table_exists)
def fetch_table_schema(self, table_name):
ddl = self.query(sql.show_create_table(table_name))
if ddl:
try:
return self.parse_function(ddl[0]['Create Table'])
except ParseError as e:
raise OSCError('TABLE_PARSING_ERROR', {'db': self._current_db, 'table': self.table_name, 'msg': str(e)})
def fetch_partitions(self, table_name):
partition_result = self.query(sql.fetch_partition, (self._current_db, table_name))
return [partition_entry['PARTITION_NAME'] for partition_entry in partition_result if (partition_entry['PARTITION_NAME'] != 'None')]
_hook
def init_table_obj(self):
if (not self.table_exists(self.table_name)):
raise OSCError('TABLE_NOT_EXIST', {'db': self._current_db, 'table': self.table_name})
self._old_table = self.fetch_table_schema(self.table_name)
self.partitions[self.table_name] = self.fetch_partitions(self.table_name)
self.partitions[self.renamed_table_name] = self.partitions[self.table_name]
if self._old_table.auto_increment:
self._new_table.auto_increment = self._old_table.auto_increment
self._new_table.engine = self._old_table.engine
self.populate_charset_collation(self._old_table)
self.populate_charset_collation(self._new_table)
def cleanup_with_force(self):
log.info('--force-cleanup specified, cleaning up things that may left behind by last run')
cleanup_payload = CleanupPayload(charset=self.charset, sudo=self.sudo, disable_replication=self.disable_replication)
for filepath in (self.outfile_exclude_id, self.outfile_include_id):
cleanup_payload.add_file_entry(filepath)
cleanup_payload.add_file_entry('{}*'.format(self._outfile_name(suffix='.old', chunk_id=0, skip_compressed_extension=True)))
cleanup_payload.add_file_entry('{}*'.format(self._outfile_name(suffix='.new', chunk_id=0, skip_compressed_extension=True)))
file_prefixes = [self.outfile, '{}.old'.format(self.outfile), '{}.new'.format(self.outfile)]
for file_prefix in file_prefixes:
log.debug('globbing {}'.format(file_prefix))
for outfile in glob.glob('{}.[0-9]*'.format(file_prefix)):
cleanup_payload.add_file_entry(outfile)
for trigger in (self.delete_trigger_name, self.update_trigger_name, self.insert_trigger_name):
cleanup_payload.add_drop_trigger_entry(self._current_db, trigger)
for tbl in (self.new_table_name, self.delta_table_name, self.renamed_table_name):
partitions = self.fetch_partitions(tbl)
cleanup_payload.add_drop_table_entry(self._current_db, tbl, partitions)
cleanup_payload.mysql_user = self.mysql_user
cleanup_payload.mysql_pass = self.mysql_pass
cleanup_payload.socket = self.socket
cleanup_payload.get_conn_func = self.get_conn_func
cleanup_payload.cleanup(self._current_db)
cleanup_payload.close_conn()
_hook
def determine_outfile_dir(self):
if self.outfile_dir:
return
for var_name in ('_file_priv', ''):
result = self.query(sql.select_as(var_name, 'folder'))
if (not result):
raise Exception('Failed to get {} system variable'.format(var_name))
if result[0]['folder']:
if (var_name == '_file_priv'):
self.outfile_dir = result[0]['folder']
else:
self.outfile_dir = os.path.join(result[0]['folder'], self._current_db_dir)
log.info('Will use {} storing dump outfile'.format(self.outfile_dir))
return
raise Exception('Cannot determine output dir for dump')
def table_check(self):
tables_to_check = (self.new_table_name, self.delta_table_name, self.renamed_table_name)
for table_name in tables_to_check:
if self.table_exists(table_name):
raise OSCError('TABLE_ALREADY_EXIST', {'db': self._current_db, 'table': table_name})
if (not all((self._new_table.primary_key, self._new_table.primary_key.column_list))):
raise OSCError('NO_PK_EXIST', {'db': self._current_db, 'table': self.table_name})
def trigger_check(self):
triggers = self.query(sql.trigger_existence, (self.table_name, self._current_db))
if triggers:
trigger_desc = ['Trigger name: {}, Action: {} {}'.format(trigger['TRIGGER_NAME'], trigger['ACTION_TIMING'], trigger['EVENT_MANIPULATION']) for trigger in triggers]
raise OSCError('TRIGGER_ALREADY_EXIST', {'triggers': '\n'.join(trigger_desc)})
def foreign_key_check(self):
if self.is_myrocks_table:
log.info("SKip foreign key check because MyRocks doesn't support this yet")
return True
foreign_keys = self.query(sql.foreign_key_cnt, (self.table_name, self._current_db, self.table_name, self._current_db))
if foreign_keys:
fk = 'CONSTRAINT `{}` FOREIGN KEY (`{}`) REFERENCES `{}` (`{}`)'.format(foreign_keys[0]['constraint_name'], foreign_keys[0]['col_name'], foreign_keys[0]['ref_tab'], foreign_keys[0]['ref_col_name'])
raise OSCError('FOREIGN_KEY_FOUND', {'db': self._current_db, 'table': self.table_name, 'fk': fk})
def get_table_size_from_IS(self, table_name):
result = self.query(sql.show_table_stats(self._current_db), (self.table_name,))
if result:
return (result[0]['Data_length'] + result[0]['Index_length'])
return 0
def get_table_size_for_myrocks(self, table_name):
result = self.query(sql.get_myrocks_table_dump_size(), (self._current_db, self.table_name))
if result:
return (result[0]['raw_size'] or 0)
return 0
def get_table_size(self, table_name):
return self.get_table_size_from_IS(table_name)
def get_expected_compression_ratio_pct(self) -> int:
return 100
def get_expected_dump_size(self, table_name):
on_disk_size = self.get_table_size(table_name)
dump_size = on_disk_size
if self.is_myrocks_table:
dump_size = self.get_table_size_for_myrocks(table_name)
if self.enable_outfile_compression:
dump_size *= self.get_expected_compression_ratio_pct()
dump_size //= 100
return dump_size
def check_disk_size(self):
self.table_size = int(self.get_table_size(self.table_name))
if self.skip_disk_space_check:
return True
dump_size = int(self.get_expected_dump_size(self.table_name))
disk_space = int(util.disk_partition_free(self.outfile_dir))
if (self.allow_new_pk and (not self._old_table.primary_key.column_list)):
required_size = (self.table_size + dump_size)
else:
required_size = (max(self.table_size, dump_size) * 1.1)
log.info('Disk space required: {}, available: {}'.format(util.readable_size(required_size), util.readable_size(disk_space)))
if (required_size > disk_space):
raise OSCError('NOT_ENOUGH_SPACE', {'need': util.readable_size(required_size), 'avail': util.readable_size(disk_space)})
def check_disk_free_space_reserved(self):
if self.skip_disk_space_check:
return True
disk_partition_size = util.disk_partition_size(self.outfile_dir)
free_disk_space = util.disk_partition_free(self.outfile_dir)
free_space_factor = (self.free_space_reserved_percent / 100)
free_space_reserved = (disk_partition_size * free_space_factor)
if (free_disk_space < free_space_reserved):
raise OSCError('NOT_ENOUGH_SPACE', {'need': util.readable_size(free_space_reserved), 'avail': util.readable_size(free_disk_space)})
def validate_post_alter_pk(self):
idx_on_new_table = ([self._new_table.primary_key] + self._new_table.indexes)
old_pk_len = len(self._pk_for_filter)
for idx in idx_on_new_table:
log.debug('Checking prefix for {}'.format(idx.name))
idx_prefix = idx.column_list[:old_pk_len]
idx_name_set = {col.name for col in idx_prefix}
if (set(self._pk_for_filter) == idx_name_set):
log.info('PK prefix on new table can cover PK from old table')
return True
if (idx.is_unique and (set(self._pk_for_filter) > idx_name_set)):
log.info('old PK can uniquely identify rows from new schema')
return True
return False
def find_coverage_index(self):
idx_on_new_table = ([self._new_table.primary_key] + self._new_table.indexes)
old_pk_len = len(self._pk_for_filter)
for idx in idx_on_new_table:
idx_prefix = idx.column_list[:old_pk_len]
idx_name_list = [col.name for col in idx_prefix]
if (self._pk_for_filter == idx_name_list):
if idx.is_unique:
return idx.name
return None
def init_range_variables(self):
self.range_start_vars_array = []
self.range_end_vars_array = []
for idx in range(len(self._pk_for_filter)):
self.range_start_vars_array.append('_start_{}'.format(idx))
self.range_end_vars_array.append('_end_{}'.format(idx))
self.range_start_vars = ','.join(self.range_start_vars_array)
self.range_end_vars = ','.join(self.range_end_vars_array)
def make_chunk_size_odd(self):
if ((self.select_checksum_chunk_size % 2) == 0):
self.select_checksum_chunk_size = (self.select_checksum_chunk_size + 1)
if ((self.select_chunk_size % 2) == 0):
self.select_chunk_size = (self.select_chunk_size + 1)
def get_table_chunk_size(self):
result = self.query(sql.table_avg_row_len, (self._current_db, self.table_name))
if result:
tbl_avg_length = result[0]['AVG_ROW_LENGTH']
if (tbl_avg_length < 20):
tbl_avg_length = 20
self.select_chunk_size = (self.chunk_size // tbl_avg_length)
if (not self.select_chunk_size):
self.select_chunk_size = 1
self.select_checksum_chunk_size = (self.checksum_chunk_size // tbl_avg_length)
if (not self.select_checksum_chunk_size):
self.select_checksum_chunk_size = 1
log.info('TABLE contains {} rows, table_avg_row_len: {} bytes,chunk_size: {} bytes checksum chunk_size {} bytes, '.format(result[0]['TABLE_ROWS'], tbl_avg_length, self.chunk_size, self.checksum_chunk_size))
log.info('Outfile will contain {} rows each'.format(self.select_chunk_size))
self.eta_chunks = max(int((result[0]['TABLE_ROWS'] / self.select_chunk_size)), 1)
else:
raise OSCError('FAIL_TO_GUESS_CHUNK_SIZE')
def has_desired_schema(self):
if (self._new_table == self._old_table):
if (not self.rebuild):
log.info('Table already has the desired schema. ')
return True
else:
log.info('Table already has the desired schema. However --rebuild is specified, doing a rebuild instead')
return False
return False
def decide_pk_for_filter(self):
all_col_def = {col.name: col for col in self._old_table.column_list}
if (not all((self._old_table.primary_key, self._old_table.primary_key.column_list))):
for idx in self._old_table.indexes:
if idx.is_unique:
log.info("Old table doesn't have a PK but has an UK: {}".format(idx.name))
self._pk_for_filter = [col.name for col in idx.column_list]
self._idx_name_for_filter = idx.name
break
else:
if self.allow_new_pk:
self._pk_for_filter = [col.name for col in self._old_table.column_list]
self.is_full_table_dump = True
else:
raise OSCError('NEW_PK')
else:
for col in self._old_table.primary_key.column_list:
if col.length:
log.info('Found prefixed column/s as part of the PK. Will do full table dump (no chunking).')
self._pk_for_filter = [c.name for c in self._old_table.column_list]
self.is_full_table_dump = True
break
else:
self._pk_for_filter = [col.name for col in self._old_table.primary_key.column_list]
self._pk_for_filter_def = [all_col_def[col_name] for col_name in self._pk_for_filter]
def ts_bootstrap_check(self):
if (not need_default_ts_bootstrap(self._old_table, self._new_table)):
return
if self.allow_unsafe_ts_bootstrap:
log.warning('Bootstraping timestamp column using current time is required. Bypassing the safety check as requested')
return
raise OSCError('UNSAFE_TS_BOOTSTRAP')
_hook
def pre_osc_check(self):
self.table_check()
self.decide_pk_for_filter()
if (not self.validate_post_alter_pk()):
self.table_size = self.get_table_size(self.table_name)
if self.skip_pk_coverage_check:
log.warning('Indexes on new table cannot cover current PK of the old schema, which will make binary logs replay in an inefficient way.')
elif (self.table_size < self.pk_coverage_size_threshold):
log.warning('No index on new table can cover old pk. Since this is a small table: {}, we fallback to a full table dump'.format(self.table_size))
self.is_full_table_dump = True
self._pk_for_filter = [col.name for col in self._old_table.column_list]
self._pk_for_filter_def = self._old_table.column_list.copy()
elif self.is_full_table_dump:
log.warning('Skipping coverage index test, since we are doing full table dump')
else:
old_pk_names = ', '.join(('`{}`'.format(col.name) for col in self._old_table.primary_key.column_list))
raise OSCError('NO_INDEX_COVERAGE', {'pk_names': old_pk_names})
log.info('PK filter for replaying changes later: {}'.format(self._pk_for_filter))
self.foreign_key_check()
self.trigger_check()
self.init_range_variables()
self.get_table_chunk_size()
self.make_chunk_size_odd()
self.check_disk_size()
self.ts_bootstrap_check()
self.drop_columns_check()
def drop_columns_check(self):
if self.dropped_column_name_list:
if self.allow_drop_column:
for diff_column in self.dropped_column_name_list:
log.warning('Column `{}` is missing in the new schema, but --allow-drop-column is specified. Will drop this column.'.format(diff_column))
else:
missing_columns = ', '.join(self.dropped_column_name_list)
raise OSCError('MISSING_COLUMN', {'column': missing_columns})
for col in self._pk_for_filter:
if (col in self.dropped_column_name_list):
raise OSCError('PRI_COL_DROPPED', {'pri_col': col})
def add_drop_table_entry(self, table_name):
self._cleanup_payload.add_drop_table_entry(self._current_db, table_name, self.partitions.get(table_name, []))
def get_collations(self):
collations = self.query(sql.all_collation)
collation_charsets = {}
for r in collations:
collation_charsets[r['COLLATION_NAME']] = r['CHARACTER_SET_NAME']
return collation_charsets
def get_default_collations(self):
collations = self.query(sql.default_collation)
charset_collations = {}
for r in collations:
charset_collations[r['CHARACTER_SET_NAME']] = r['COLLATION_NAME']
utf8_override = self.query(sql.get_global_variable('default_collation_for_utf8mb4'))
if (utf8_override and ('utf8mb4' in charset_collations)):
charset_collations['utf8mb4'] = utf8_override[0]['Value']
return charset_collations
def populate_charset_collation(self, schema_obj):
default_collations = self.get_default_collations()
collation_charsets = self.get_collations()
if ((schema_obj.charset is not None) and (schema_obj.collate is None)):
if (schema_obj.charset == 'utf8mb3'):
schema_obj.collate = default_collations.get('utf8', None)
else:
schema_obj.collate = default_collations.get(schema_obj.charset, None)
if ((schema_obj.charset is None) and (schema_obj.collate is not None)):
schema_obj.charset = None
text_types = {'CHAR', 'VARCHAR', 'TEXT', 'MEDIUMTEXT', 'LONGTEXT', 'ENUM'}
for column in schema_obj.column_list:
if (column.column_type in text_types):
if (column.collate is None):
if (column.charset and default_collations.get(column.charset, None)):
column.collate = default_collations[column.charset]
else:
column.collate = schema_obj.collate
if (column.charset is None):
if (column.collate and collation_charsets.get(column.collate, None)):
column.charset = collation_charsets[column.collate]
else:
column.charset = schema_obj.charset
return schema_obj
def remove_using_hash_for_80(self):
for index in self._new_table.indexes:
if (index.using == 'HASH'):
index.using = None
_hook
def create_copy_table(self):
tmp_sql_obj = deepcopy(self._new_table)
tmp_sql_obj.name = self.new_table_name
if self.rm_partition:
tmp_sql_obj.partition = self._old_table.partition
tmp_sql_obj.partition_config = self._old_table.partition_config
tmp_table_ddl = tmp_sql_obj.to_sql()
log.info('Creating copy table using: {}'.format(tmp_table_ddl))
self.execute_sql(tmp_table_ddl)
self.partitions[self.new_table_name] = self.fetch_partitions(self.new_table_name)
self.add_drop_table_entry(self.new_table_name)
if self.fail_for_implicit_conv:
obj_after = self.fetch_table_schema(self.new_table_name)
obj_after.name = self._new_table.name
obj_after.partition = self._new_table.partition
obj_after.partition_config = self._new_table.partition_config
self.populate_charset_collation(obj_after)
if self.mysql_version.is_mysql8:
self.remove_using_hash_for_80()
if self.is_myrocks_table:
log.warning(f'Ignore BTREE indexes in table `{self._new_table.name}` on RocksDB')
for idx in self._new_table.indexes:
if (idx.using == 'BTREE'):
idx.using = None
if (obj_after != self._new_table):
raise OSCError('IMPLICIT_CONVERSION_DETECTED', {'diff': str(SchemaDiff(self._new_table, obj_after))})
_hook
def create_delta_table(self):
self.execute_sql(sql.create_delta_table(self.delta_table_name, self.IDCOLNAME, self.DMLCOLNAME, self._old_table.engine, self.old_column_list, self._old_table.name))
self.add_drop_table_entry(self.delta_table_name)
if (self._pk_for_filter_def and (not self.is_full_table_dump)):
self.execute_sql(sql.create_idx_on_delta_table(self.delta_table_name, [col.name for col in self._pk_for_filter_def]))
def create_insert_trigger(self):
self.execute_sql(sql.create_insert_trigger(self.insert_trigger_name, self.table_name, self.delta_table_name, self.DMLCOLNAME, self.old_column_list, self.DML_TYPE_INSERT))
self._cleanup_payload.add_drop_trigger_entry(self._current_db, self.insert_trigger_name)
_hook
def create_delete_trigger(self):
self.execute_sql(sql.create_delete_trigger(self.delete_trigger_name, self.table_name, self.delta_table_name, self.DMLCOLNAME, self.old_column_list, self.DML_TYPE_DELETE))
self._cleanup_payload.add_drop_trigger_entry(self._current_db, self.delete_trigger_name)
def create_update_trigger(self):
self.execute_sql(sql.create_update_trigger(self.update_trigger_name, self.table_name, self.delta_table_name, self.DMLCOLNAME, self.old_column_list, self.DML_TYPE_UPDATE, self.DML_TYPE_DELETE, self.DML_TYPE_INSERT, self._pk_for_filter))
self._cleanup_payload.add_drop_trigger_entry(self._current_db, self.update_trigger_name)
def get_long_trx(self):
if self.skip_long_trx_check:
return False
processes = self.query(sql.show_processlist)
for proc in processes:
if (not proc['Info']):
sql_statement = ''
elif isinstance(proc['Info'], bytes):
sql_statement = proc['Info'].decode('utf-8', 'replace')
else:
sql_statement = proc['Info']
proc['Info'] = sql_statement
if (((proc.get('Time') or 0) > self.long_trx_time) and (proc.get('db', '') == self._current_db) and (self.table_name in ('--' + sql_statement)) and (not (proc.get('Command', '') == 'Sleep'))):
return proc
def wait_until_slow_query_finish(self):
for _ in range(self.max_wait_for_slow_query):
slow_query = self.get_long_trx()
if slow_query:
log.info('Slow query pid={} is still running'.format(slow_query.get('Id', 0)))
time.sleep(5)
else:
return True
else:
raise OSCError('LONG_RUNNING_TRX', {'pid': slow_query.get('Id', 0), 'user': slow_query.get('User', ''), 'host': slow_query.get('Host', ''), 'time': slow_query.get('Time', ''), 'command': slow_query.get('Command', ''), 'info': slow_query.get('Info', b'').encode('utf-8').decode('utf-8', 'replace')})
def kill_selects(self, table_names, conn=None):
conn = (conn or self.conn)
table_names = [tbl.lower() for tbl in table_names]
keyword_pattern = '(\\s|^)({})(\\s|$)'
table_pattern = '(\\s|`)({})(\\s|`|$)'
alter_or_select_pattern = re.compile(keyword_pattern.format('select|alter'))
information_schema_pattern = re.compile(keyword_pattern.format('information_schema'))
any_tables_pattern = re.compile(table_pattern.format('|'.join(table_names)))
processlist = conn.get_running_queries()
for proc in processlist:
sql_statement = (proc.get('Info') or ''.encode('utf-8'))
sql_statement = sql_statement.decode('utf-8', 'replace').lower()
if ((proc['db'] == self._current_db) and sql_statement and (not information_schema_pattern.search(sql_statement)) and any_tables_pattern.search(sql_statement) and alter_or_select_pattern.search(sql_statement)):
try:
conn.kill_query_by_id(int(proc['Id']))
except MySQLdb.MySQLError as e:
(errcode, errmsg) = e.args
if (errcode == 1094):
log.info('Trying to kill query id: {}, but it has already finished'.format(proc['Id']))
else:
raise
def start_transaction(self):
self.execute_sql(sql.start_transaction)
self.under_transaction = True
def commit(self):
self.execute_sql(sql.commit)
self.under_transaction = False
def ddl_guard(self):
for _ in range(self.ddl_guard_attempts):
result = self.query(sql.show_status, ('Threads_running',))
if result:
threads_running = int(result[0]['Value'])
if (threads_running > self.max_running_before_ddl):
log.warning('Threads running: {}, bigger than allowed: {}. Sleep 1 second before check again.'.format(threads_running, self.max_running_before_ddl))
time.sleep(1)
else:
log.debug('Threads running: {}, less than: {}. We are good to go'.format(threads_running, self.max_running_before_ddl))
return
log.error("Hit max attempts: {}, but the threads running still don't dropbelow: {}.".format(self.ddl_guard_attempts, self.max_running_before_ddl))
raise OSCError('DDL_GUARD_ATTEMPTS')
_hook
def lock_tables(self, tables):
for _ in range(self.lock_max_attempts):
another_conn = self.get_conn(self._current_db)
kill_timer = Timer(self.lock_max_wait_before_kill_seconds, self.kill_selects, args=(tables, another_conn))
self._last_kill_timer = kill_timer
kill_timer.start()
try:
self.execute_sql(sql.lock_tables(tables))
kill_timer.cancel()
log.info('Successfully lock table(s) for write: {}'.format(', '.join(tables)))
break
except MySQLdb.MySQLError as e:
(errcode, errmsg) = e.args
if (errcode in (1205, 1213)):
log.warning('Retry locking because of error: {}'.format(e))
else:
raise
finally:
kill_timer.cancel()
kill_timer.join()
another_conn.close()
else:
raise OSCError('FAILED_TO_LOCK_TABLE', {'tables': ', '.join(tables)})
def unlock_tables(self):
self.execute_sql(sql.unlock_tables)
log.info('Table(s) unlocked')
_hook
def create_triggers(self):
self.stop_slave_sql()
self.ddl_guard()
log.debug('Locking table: {} before creating trigger'.format(self.table_name))
if (not self.is_high_pri_ddl_supported):
self.wait_until_slow_query_finish()
self.lock_tables(tables=[self.table_name])
try:
log.info('Creating triggers')
self.create_insert_trigger()
self.create_delete_trigger()
self.create_update_trigger()
except Exception as e:
if (not self.is_high_pri_ddl_supported):
self.unlock_tables()
self.start_slave_sql()
log.error('Failed to execute sql for creating triggers')
raise OSCError('CREATE_TRIGGER_ERROR', {'msg': str(e)})
if (not self.is_high_pri_ddl_supported):
self.unlock_tables()
self.start_slave_sql()
def disable_ttl_for_myrocks(self):
if (self.mysql_vars.get('rocksdb_enable_ttl', 'OFF') == 'ON'):
self.execute_sql(sql.set_global_variable('rocksdb_enable_ttl'), ('OFF',))
self.is_ttl_disabled_by_me = True
else:
log.debug('TTL not enabled for MyRocks, skip')
def enable_ttl_for_myrocks(self):
if self.is_ttl_disabled_by_me:
self.execute_sql(sql.set_global_variable('rocksdb_enable_ttl'), ('ON',))
else:
log.debug('TTL not enabled for MyRocks before schema change, skip')
_hook
def start_snapshot(self):
if (self.is_myrocks_table and self.is_myrocks_ttl_table):
log.debug("It's schema change for MyRocks table which is using TTL")
self.disable_ttl_for_myrocks()
self.execute_sql(sql.start_transaction_with_snapshot)
current_max = self.get_max_delta_id()
log.info('Changes with id <= {} committed before dump snapshot, and should be ignored.'.format(current_max))
new_changes = self.query(sql.get_replay_row_ids(self.IDCOLNAME, self.DMLCOLNAME, self.delta_table_name, None, self.mysql_version.is_mysql8), (self.last_replayed_id, current_max))
self._replayed_chg_ids.extend([r[self.IDCOLNAME] for r in new_changes])
self.last_replayed_id = current_max
def affected_rows(self):
return self._conn.conn.affected_rows()
def refresh_range_start(self):
self.execute_sql(sql.select_into(self.range_end_vars, self.range_start_vars))
def select_full_table_into_outfile(self):
stage_start_time = time.time()
try:
outfile = self._outfile_name(chunk_id=1)
sql_string = sql.select_full_table_into_file((self._pk_for_filter + self.old_non_pk_column_list), self.table_name, self.where, enable_outfile_compression=self.enable_outfile_compression)
affected_rows = self.execute_sql(sql_string, (self._outfile_name(chunk_id=1, skip_compressed_extension=True),))
self.outfile_suffix_start = 1
self.outfile_suffix_end = 1
self.stats['outfile_lines'] = affected_rows
self.stats['outfile_size'] = (os.path.getsize(outfile) if (not self.use_sql_wsenv) else 0)
self._cleanup_payload.add_file_entry(outfile)
self.commit()
except MySQLdb.OperationalError as e:
(errnum, errmsg) = e.args
if (errnum == 1086):
raise OSCError('FILE_ALREADY_EXIST', {'file': outfile})
else:
raise
self.stats['time_in_dump'] = (time.time() - stage_start_time)
_hook
def select_chunk_into_outfile(self, use_where):
outfile = self._outfile_name(chunk_id=self.outfile_suffix_end, skip_compressed_extension=True)
try:
sql_string = sql.select_full_table_into_file_by_chunk(self.table_name, self.range_start_vars_array, self.range_end_vars_array, self._pk_for_filter, self.old_non_pk_column_list, self.select_chunk_size, use_where, self.where, self._idx_name_for_filter, enable_outfile_compression=self.enable_outfile_compression)
affected_rows = self.execute_sql(sql_string, (outfile,))
except MySQLdb.OperationalError as e:
(errnum, errmsg) = e.args
if (errnum == 1086):
raise OSCError('FILE_ALREADY_EXIST', {'file': outfile})
else:
raise
outfile = self._outfile_name(chunk_id=self.outfile_suffix_end)
log.debug('{} affected'.format(affected_rows))
self.stats['outfile_lines'] = (affected_rows + self.stats.setdefault('outfile_lines', 0))
self.stats['outfile_cnt'] = (1 + self.stats.setdefault('outfile_cnt', 0))
self.stats['outfile_size'] = ((os.path.getsize(outfile) + self.stats.setdefault('outfile_size', 0)) if (not self.use_sql_wsenv) else 0)
self._cleanup_payload.add_file_entry(outfile)
return affected_rows
_hook
def log_dump_progress(self, outfile_suffix):
progress = 'Dump progress: {}/{}(ETA) chunks'.format(outfile_suffix, self.eta_chunks)
self.stats['dump_progress'] = progress
log.info(progress)
_hook
def select_table_into_outfile(self):
log.info('== Stage 2: Dump ==')
stage_start_time = time.time()
if self.is_full_table_dump:
log.info('Dumping full table in one go.')
return self.select_full_table_into_outfile()
outfile_suffix = 1
self.outfile_suffix_start = 1
affected_rows = 1
use_where = False
printed_chunk = 0
while affected_rows:
self.outfile_suffix_end = outfile_suffix
affected_rows = self.select_chunk_into_outfile(use_where)
if affected_rows:
self.refresh_range_start()
use_where = True
outfile_suffix += 1
self.check_disk_free_space_reserved()
progress_pct = int(((float(outfile_suffix) / self.eta_chunks) * 100))
progress_chunk = int((progress_pct / 10))
if ((progress_chunk > printed_chunk) and (self.eta_chunks > 10)):
self.log_dump_progress(outfile_suffix)
printed_chunk = progress_chunk
self.commit()
log.info('Dump finished')
self.stats['time_in_dump'] = (time.time() - stage_start_time)
_hook
def drop_non_unique_indexes(self):
for idx in self.droppable_indexes:
log.info("Dropping index '{}' on intermediate table".format(idx.name))
self.ddl_guard()
self.execute_sql(sql.drop_index(idx.name, self.new_table_name))
_hook
def load_chunk(self, column_list, chunk_id):
sql_string = sql.load_data_infile(self.new_table_name, column_list, ignore=self.eliminate_dups, enable_outfile_compression=self.enable_outfile_compression)
log.debug(sql_string)
filepath = self._outfile_name(chunk_id)
self.load_chunk_file(filepath, sql_string, chunk_id)
if ((not self.use_sql_wsenv) and self.rm_file(filepath)):
util.sync_dir(self.outfile_dir)
self._cleanup_payload.remove_file_entry(filepath)
def load_chunk_file(self, filepath, sql_string: str, chunk_id: int):
self.execute_sql(sql_string, (filepath,))
def change_explicit_commit(self, enable=True):
if self.may_have_dup_unique_keys():
log.warning('Disable explicit_commit, because there may be duplicate keys.')
return
log.info(('explicit_commit is enabled' if enable else 'explicit_commit is disabled'))
v = (1 if enable else 0)
try:
self.execute_sql(sql.set_session_variable('rocksdb_commit_in_the_middle'), (v,))
except MySQLdb.OperationalError as e:
(errnum, errmsg) = e.args
if (errnum == 1193):
log.warning('Failed to set rocksdb_commit_in_the_middle: {}'.format(errmsg))
else:
raise
def change_rocksdb_bulk_load(self, enable=True):
if self.should_disable_bulk_load():
return
v = (1 if enable else 0)
log.info(('Bulk load is enabled' if enable else 'Bulk load is disabled'))
try:
if (self.rocksdb_bulk_load_allow_sk and enable):
self.execute_sql(sql.set_session_variable('rocksdb_bulk_load_allow_sk'), (v,))
self.execute_sql(sql.set_session_variable('rocksdb_bulk_load'), (v,))
if (self.rocksdb_bulk_load_allow_sk and (not enable)):
self.execute_sql(sql.set_session_variable('rocksdb_bulk_load_allow_sk'), (v,))
except MySQLdb.OperationalError as e:
(errnum, errmsg) = e.args
if (errnum == 1193):
log.warning('Failed to set rocksdb_bulk_load: {}'.format(errmsg))
else:
raise
def should_disable_bulk_load(self):
reason = ''
if (self._old_table.primary_key != self._new_table.primary_key):
reason = 'because we are changing PK'
elif self.may_have_dup_unique_keys():
reason = 'because there may be duplicated unique keys'
else:
new_cols = {col.name: col for col in self._new_table.column_list}
for (idx, col_name) in enumerate(self._pk_for_filter):
if ((new_cols[col_name].charset != self._pk_for_filter_def[idx].charset) or (new_cols[col_name].collate != self._pk_for_filter_def[idx].collate)):
reason = 'because we are changing PK column charset/collate'
break
if reason:
log.warning(('Disable rocksdb_bulk_load, ' + reason))
return True
return False
def may_have_dup_unique_keys(self):
diff = SchemaDiff(self._old_table, self._new_table)
return ((not self.eliminate_dups) and (IndexAlterType.BECOME_UNIQUE_INDEX in diff.alter_types))
_hook
def log_load_progress(self, suffix):
progress = 'Load progress: {}/{} chunks'.format(suffix, self.outfile_suffix_end)
self.stats['load_progress'] = progress
log.info(progress)
_hook
def load_data(self):
stage_start_time = time.time()
log.info('== Stage 3: Load data ==')
if self._pk_for_filter:
if self.old_non_pk_column_list:
column_list = (self._pk_for_filter + self.old_non_pk_column_list)
else:
column_list = self._pk_for_filter
elif self.old_non_pk_column_list:
column_list = self.old_non_pk_column_list
else:
raise OSCError('OSC_INTERNAL_ERROR', {'msg': 'Unexpected scenario. Both _pk_for_filter and old_non_pk_column_list are empty'})
if self.is_myrocks_table:
self.change_rocksdb_bulk_load(enable=True)
self.change_explicit_commit(enable=True)
for suffix in range(self.outfile_suffix_start, (self.outfile_suffix_end + 1)):
self.load_chunk(column_list, suffix)
if ((suffix % max(5, int((self.outfile_suffix_end / 5)))) == 0):
self.log_load_progress(suffix)
if self.is_myrocks_table:
self.change_rocksdb_bulk_load(enable=False)
self.change_explicit_commit(enable=False)
self.stats['time_in_load'] = (time.time() - stage_start_time)
def check_max_statement_time_exists(self):
if self.mysql_version.is_mysql8:
return self.is_var_enabled('max_execution_time')
else:
try:
self.query(sql.select_max_statement_time)
return True
except Exception:
log.warning("MAX_STATEMENT_TIME doesn't support in this MySQL")
return False
def append_to_exclude_id(self):
self.execute_sql(sql.insert_into_select_from(into_table=self.tmp_table_exclude_id, into_col_list=(self.IDCOLNAME, self.DMLCOLNAME), from_table=self.tmp_table_include_id, from_col_list=(self.IDCOLNAME, self.DMLCOLNAME), enable_outfile_compression=self.enable_outfile_compression))
def get_max_delta_id(self):
result = self.query(sql.get_max_id_from(self.IDCOLNAME, self.delta_table_name))
if (result[0]['max_id'] == 'None'):
return max(0, self.max_id_to_replay_upto_for_good2go)
elif (self.max_id_to_replay_upto_for_good2go != (- 1)):
return self.max_id_to_replay_upto_for_good2go
return result[0]['max_id']
_hook
def replay_delete_row(self, replay_sql, last_id, *ids):
affected_row = self.execute_sql(replay_sql, ids)
if ((not self.eliminate_dups) and (not self.where) and (not self.skip_affected_rows_check)):
if (not (affected_row != 0)):
log.error(f'failed to replay {ids}')
outfile = self._outfile_name(suffix='.failed_replay', chunk_id=0, skip_compressed_extension=True)
self.query(sql.get_replay_tbl_in_outfile(self.IDCOLNAME, self.delta_table_name, outfile), (self.last_replayed_id, self.max_id_now))
raise OSCError('REPLAY_WRONG_AFFECTED', {'num': affected_row})
_hook
def replay_insert_row(self, sql, last_id, *ids):
affected_row = self.execute_sql(sql, ids)
if ((not self.eliminate_dups) and (not self.where) and (not self.skip_affected_rows_check)):
if (not (affected_row != 0)):
raise OSCError('REPLAY_WRONG_AFFECTED', {'num': affected_row})
_hook
def replay_update_row(self, sql, last_id, *ids):
self.execute_sql(sql, ids)
def get_gap_changes(self):
delta = []
log.info('Checking {} gap ids'.format(len(self._replayed_chg_ids.missing_points())))
for chg_id in self._replayed_chg_ids.missing_points():
row = self.query(sql.get_chg_row(self.IDCOLNAME, self.DMLCOLNAME, self.delta_table_name), (chg_id,))
if bool(row):
log.debug('Change {} appears now!'.format(chg_id))
delta.append(row[0])
for row in delta:
self._replayed_chg_ids.fill(row[self.IDCOLNAME])
log.info('{} changes before last checkpoint ready for replay'.format(len(delta)))
return delta
def divide_changes_to_group(self, chg_rows):
id_group = []
type_now = None
for (idx, chg) in enumerate(chg_rows):
if (type_now is None):
type_now = chg[self.DMLCOLNAME]
id_group.append(chg[self.IDCOLNAME])
if (idx == (len(chg_rows) - 1)):
(yield (type_now, id_group))
return
elif (type_now == self.DML_TYPE_UPDATE):
(yield (type_now, id_group))
type_now = None
id_group = []
elif (chg_rows[(idx + 1)][self.DMLCOLNAME] != type_now):
(yield (type_now, id_group))
type_now = None
id_group = []
elif (len(id_group) >= self.replay_group_size):
(yield (type_now, id_group))
type_now = None
id_group = []
else:
continue
def replay_changes(self, single_trx=False, holding_locks=False, delta_id_limit=None):
if (single_trx and (not self.bypass_replay_timeout) and self.check_max_statement_time_exists()):
replay_ms = (self.replay_timeout * 1000)
else:
replay_ms = None
max_id_now = self.determine_replay_id((delta_id_limit if delta_id_limit else self.get_max_delta_id()))
stage_start_time = time.time()
self.current_catchup_start_time = int(stage_start_time)
log.debug('Timeout for replay changes: {}'.format(self.replay_timeout))
time_start = stage_start_time
(deleted, inserted, updated) = (0, 0, 0)
self.record_currently_replaying_id(max_id_now)
self.max_id_now = max_id_now
log.info('max_id_now is %r / %r', max_id_now, self.replay_max_changes)
if (max_id_now > self.replay_max_changes):
raise OSCError('REPLAY_TOO_MANY_DELTAS', {'deltas': max_id_now, 'max_deltas': self.replay_max_changes})
if (self.detailed_mismatch_info or self.dump_after_checksum):
log.info('Replaying changes happened before change ID: {}'.format(max_id_now))
delta = self.get_gap_changes()
new_changes = self.query(sql.get_replay_row_ids(self.IDCOLNAME, self.DMLCOLNAME, self.delta_table_name, replay_ms, self.mysql_version.is_mysql8), (self.last_replayed_id, max_id_now))
self._replayed_chg_ids.extend([r[self.IDCOLNAME] for r in new_changes])
delta.extend(new_changes)
log.info('Total {} changes to replay'.format(len(delta)))
delete_sql = sql.replay_delete_row(self.new_table_name, self.delta_table_name, self.IDCOLNAME, self._pk_for_filter)
update_sql = sql.replay_update_row(self.old_non_pk_column_list, self.new_table_name, self.delta_table_name, self.eliminate_dups, self.IDCOLNAME, self._pk_for_filter)
insert_sql = sql.replay_insert_row(self.old_column_list, self.new_table_name, self.delta_table_name, self.IDCOLNAME, self.eliminate_dups)
replayed = 0
replayed_total = 0
showed_pct = 0
for (chg_type, ids) in self.divide_changes_to_group(delta):
if (holding_locks and (not self.bypass_replay_timeout) and ((time.time() - time_start) > self.replay_timeout)):
raise OSCError('REPLAY_TIMEOUT')
replayed_total += len(ids)
if ((not single_trx) and (replayed > self.replay_batch_size)):
self.commit()
self.start_transaction()
replayed = 0
else:
replayed += len(ids)
if (chg_type == self.DML_TYPE_DELETE):
self.replay_delete_row(delete_sql, ids[(- 1)], ids)
deleted += len(ids)
elif (chg_type == self.DML_TYPE_UPDATE):
self.replay_update_row(update_sql, ids[(- 1)], ids)
updated += len(ids)
elif (chg_type == self.DML_TYPE_INSERT):
self.replay_insert_row(insert_sql, ids[(- 1)], ids)
inserted += len(ids)
else:
raise OSCError('UNKOWN_REPLAY_TYPE', {'type_value': chg_type})
progress_pct = int(((replayed_total / len(delta)) * 100))
if (progress_pct > showed_pct):
log.info('Replay progress: {}/{} changes'.format(replayed_total, len(delta)))
showed_pct += 10
if (not single_trx):
self.commit()
self.last_replayed_id = max_id_now
end_time = time.time()
self.current_catchup_end_time = int(end_time)
time_spent = (end_time - stage_start_time)
self.stats['time_in_replay'] = (self.stats.setdefault('time_in_replay', 0) + time_spent)
log.info('Replayed {} INSERT, {} DELETE, {} UPDATE in {:.2f} Seconds'.format(inserted, deleted, updated, time_spent))
if (time_spent > 0.0):
self.stats['last_catchup_speed'] = (((inserted + deleted) + updated) / time_spent)
def record_currently_replaying_id(self, max_id_now: int) -> None:
return
def determine_replay_id(self, max_replay_id: int):
if (self.max_id_to_replay_upto_for_good2go != (- 1)):
if ((not max_replay_id) or (max_replay_id > self.max_id_to_replay_upto_for_good2go)):
return self.max_id_to_replay_upto_for_good2go
return max_replay_id
def set_innodb_tmpdir(self, innodb_tmpdir):
try:
self.execute_sql(sql.set_session_variable('innodb_tmpdir'), (innodb_tmpdir,))
except MySQLdb.OperationalError as e:
(errnum, errmsg) = e.args
if (errnum in (1231, 1193)):
log.warning('Failed to set innodb_tmpdir, falling back to tmpdir: {}'.format(errmsg))
else:
raise
_hook
def recreate_non_unique_indexes(self):
if (not self.droppable_indexes):
return
self.set_innodb_tmpdir(self.outfile_dir)
if self.droppable_indexes:
self.ddl_guard()
log.info('Recreating indexes: {}'.format(', '.join((col.name for col in self.droppable_indexes))))
self.execute_sql(sql.add_index(self.new_table_name, self.droppable_indexes))
_hook
def analyze_table(self):
self.query(sql.analyze_table(self.new_table_name))
self.query(sql.analyze_table(self.delta_table_name))
def compare_checksum(self, old_table_checksum, new_table_checksum):
if (len(old_table_checksum) != len(new_table_checksum)):
log.error('The total number of checksum chunks mismatch OLD={}, NEW={}'.format(len(old_table_checksum), len(new_table_checksum)))
raise OSCError('CHECKSUM_MISMATCH')
log.info('{} checksum chunks in total'.format(len(old_table_checksum)))
checksum_xor = 0
for (idx, checksum_entry) in enumerate(old_table_checksum):
for col in checksum_entry:
if (not (old_table_checksum[idx][col] == new_table_checksum[idx][col])):
log.error('checksum/count mismatch for chunk {} column `{}`: OLD={}, NEW={}'.format(idx, col, old_table_checksum[idx][col], new_table_checksum[idx][col]))
log.error('Number of rows for the chunk that cause the mismatch: OLD={}, NEW={}'.format(old_table_checksum[idx]['cnt'], new_table_checksum[idx]['cnt']))
log.error('Current replayed max(__OSC_ID) of chg table {}'.format(self.last_replayed_id))
raise OSCError('CHECKSUM_MISMATCH')
else:
checksum_xor ^= old_table_checksum[idx][col]
self.current_checksum_record = checksum_xor
def checksum_full_table(self):
old_checksum = self.query(sql.checksum_full_table(self.table_name, self._old_table.column_list))
new_checksum = self.query(sql.checksum_full_table(self.new_table_name, self._old_table.column_list))
self.commit()
if (old_checksum and new_checksum):
self.compare_checksum(old_checksum, new_checksum)
def checksum_for_single_chunk(self, table_name, use_where, idx_for_checksum):
return self.query(sql.checksum_by_chunk_with_assign(table_name, self.checksum_column_list, self._pk_for_filter, self.range_start_vars_array, self.range_end_vars_array, self.select_chunk_size, use_where, idx_for_checksum))[0]
def dump_current_chunk(self, use_where):
log.info('Dumping raw data onto local disk for further investigation')
log.info('Columns will be dumped in following order: ')
log.info(', '.join((self._pk_for_filter + self.checksum_column_list)))
for table_name in [self.table_name, self.new_table_name]:
if (table_name == self.new_table_name):
idx_for_checksum = self.find_coverage_index()
outfile = self._outfile_name(suffix='.new', chunk_id=0, skip_compressed_extension=True)
else:
idx_for_checksum = 'PRIMARY'
outfile = self._outfile_name(suffix='.old', chunk_id=0, skip_compressed_extension=True)
log.info('Dump offending chunk from {} into {}'.format(table_name, outfile))
self.execute_sql(sql.dump_current_chunk(table_name, self.checksum_column_list, self._pk_for_filter, self.range_start_vars_array, self.select_chunk_size, idx_for_checksum, use_where, enable_outfile_compression=self.enable_outfile_compression), (outfile,))
_hook
def detailed_checksum(self):
affected_rows = 1
use_where = False
new_idx_for_checksum = self.find_coverage_index()
old_idx_for_checksum = 'PRIMARY'
chunk_id = 0
while affected_rows:
chunk_id += 1
old_checksum = self.checksum_for_single_chunk(self.table_name, use_where, old_idx_for_checksum)
new_checksum = self.checksum_for_single_chunk(self.new_table_name, use_where, new_idx_for_checksum)
affected_rows = old_checksum['_osc_chunk_cnt']
if (list(old_checksum.values()) != list(new_checksum.values())):
log.info('Checksum mismatch detected for chunk {}: '.format(chunk_id))
log.info('OLD: {}'.format(str(old_checksum)))
log.info('NEW: {}'.format(str(new_checksum)))
self.dump_current_chunk(use_where)
raise OSCError('CHECKSUM_MISMATCH')
if affected_rows:
self.refresh_range_start()
use_where = True
_hook
def checksum_by_chunk(self, table_name, dump_after_checksum=False):
checksum_result = []
affected_rows = 1
use_where = False
outfile_id = 0
if (table_name == self.new_table_name):
idx_for_checksum = self.find_coverage_index()
outfile_prefix = '{}.new'.format(self.outfile)
else:
idx_for_checksum = self._idx_name_for_filter
outfile_prefix = '{}.old'.format(self.outfile)
while affected_rows:
checksum = self.query(sql.checksum_by_chunk(table_name, self.checksum_column_list, self._pk_for_filter, self.range_start_vars_array, self.range_end_vars_array, self.select_checksum_chunk_size, use_where, idx_for_checksum))
if dump_after_checksum:
self.execute_sql(sql.dump_current_chunk(table_name, self.checksum_column_list, self._pk_for_filter, self.range_start_vars_array, self.select_checksum_chunk_size, idx_for_checksum, use_where, enable_outfile_compression=self.enable_outfile_compression), ('{}.{}'.format(outfile_prefix, str(outfile_id)),))
outfile_id += 1
if checksum:
self.refresh_range_start()
affected_rows = checksum[0]['cnt']
checksum_result.append(checksum[0])
use_where = True
return checksum_result
def need_checksum(self):
if self.skip_checksum:
log.warning('Skip checksum because --skip-checksum is specified')
return False
if self.where:
log.warning('Skip checksum because --where is given')
return False
for pri_column in self._pk_for_filter:
old_column_tmp = [col for col in self._old_table.column_list if (col.name == pri_column)]
if old_column_tmp:
old_column = old_column_tmp[0]
new_column_tmp = [col for col in self._new_table.column_list if (col.name == pri_column)]
if new_column_tmp:
new_column = new_column_tmp[0]
if (old_column and new_column):
if (not is_equal(old_column.collate, new_column.collate)):
log.warning('Collation of primary key column {} has been changed. Skip checksum '.format(old_column.name))
return False
if (not self.validate_post_alter_pk()):
if self.skip_pk_coverage_check:
log.warning("Skipping checksuming because there's no unique index in new table schema can perfectly cover old primary key combination for search".format(old_column.name))
return False
elif (not self.find_coverage_index()):
log.warning("Skipping checksuming because there's no unique index in new table schema can perfectly cover old primary key combination for search".format(old_column.name))
return False
return True
def need_checksum_for_changes(self):
if (not self.need_checksum()):
return False
if self.is_full_table_dump:
log.warning("We're adding new primary key to the table. Skip running checksum for changes, because that's inefficient")
return False
return True
_hook
def checksum(self):
log.info('== Stage 4: Checksum ==')
if (not self.need_checksum()):
return
stage_start_time = time.time()
if self.eliminate_dups:
log.warning('Skip checksum, because --eliminate-duplicate specified')
return
log.info('= Stage 4.1: Catch up before generating checksum =')
self.replay_till_good2go(checksum=False)
log.info('= Stage 4.2: Comparing checksum =')
self.start_transaction()
log.info('Replay changes to bring two tables to a comparable state')
self.checksum_required_for_replay = True
self.replay_changes(single_trx=True)
if self.is_full_table_dump:
return self.checksum_full_table()
if (not self.detailed_mismatch_info):
log.info('Checksuming data from old table')
old_table_checksum = self.checksum_by_chunk(self.table_name, dump_after_checksum=self.dump_after_checksum)
self.commit()
log.info('Checksuming data from new table')
new_table_checksum = self.checksum_by_chunk(self.new_table_name, dump_after_checksum=self.dump_after_checksum)
log.info('Compare checksum')
self.compare_checksum(old_table_checksum, new_table_checksum)
else:
self.detailed_checksum()
self.last_checksumed_id = self.last_replayed_id
self.record_checksum()
log.info('Checksum match between new and old table')
self.stats['time_in_table_checksum'] = (time.time() - stage_start_time)
def record_checksum(self):
return
_hook
def evaluate_replay_progress(self):
self.stats['num_replay_attempts'] += 1
self.stats['replay_progress'] = f"Replay progress: {self.stats['num_replay_attempts']}/{self.replay_max_attempt}(MAX ATTEMPTS)"
_hook
def replay_till_good2go(self, checksum, final_catchup: bool=False):
log.info('Replay at most {} more round(s) until we can finish in {} seconds'.format(self.replay_max_attempt, self.replay_timeout))
self.stats['num_replay_attempts'] = 0
self.execute_sql(sql.set_session_variable('long_query_time'), (1,))
for i in range(self.replay_max_attempt):
log.info('Catchup Attempt: {}'.format((i + 1)))
self.evaluate_replay_progress()
start_time = time.time()
if (checksum and self.need_checksum()):
self.start_transaction()
log.info('Catch up in order to compare checksum for the rows that have been changed')
self.checksum_required_for_replay = True
self.replay_changes(single_trx=True)
self.checksum_for_changes(single_trx=False)
else:
self.checksum_required_for_replay = False
max_id_now = self.get_max_delta_id()
while ((max_id_now - self.last_replayed_id) > self.max_replay_batch_size):
delta_id_limit = (self.last_replayed_id + self.max_replay_batch_size)
log.info('Replay up to {}'.format(delta_id_limit))
self.replay_changes(single_trx=False, delta_id_limit=delta_id_limit)
self.replay_changes(single_trx=False, delta_id_limit=max_id_now)
time_in_replay = (time.time() - start_time)
if (time_in_replay < self.replay_timeout):
log.info('Time spent in last round of replay is {:.2f}, which is less than replay_timeout: {} for final replay. We are good to proceed'.format(time_in_replay, self.replay_timeout))
break
else:
if (not self.bypass_replay_timeout):
raise OSCError('MAX_ATTEMPT_EXCEEDED', {'timeout': self.replay_timeout})
else:
log.warning('Proceed after max replay attempts exceeded. Because --bypass-replay-timeout is specified')
def get_max_replay_batch_size(self) -> int:
return self.max_replay_batch_size
_hook
def checksum_by_replay_chunk(self, table_name):
checksum_result = []
id_limit = self.last_checksumed_id
while (id_limit < self.last_replayed_id):
result = self.query(sql.checksum_by_replay_chunk(table_name, self.delta_table_name, self.old_column_list, self._pk_for_filter, self.IDCOLNAME, id_limit, self.last_replayed_id, self.replay_batch_size))
checksum_result.append(result[0])
id_limit += self.replay_batch_size
return checksum_result
_hook
def checksum_for_changes(self, single_trx=False):
if self.eliminate_dups:
log.warning('Skip checksum, because --eliminate-duplicate specified')
return
elif (not self.need_checksum_for_changes()):
return
elif self.is_full_table_dump:
return
else:
log.info('Running checksum for rows have been changed since last checksum from change ID: {}'.format(self.last_checksumed_id))
start_time = time.time()
old_table_checksum = self.checksum_by_replay_chunk(self.table_name)
new_table_checksum = self.checksum_by_replay_chunk(self.new_table_name)
if (not single_trx):
self.commit()
self.compare_checksum(old_table_checksum, new_table_checksum)
self.last_checksumed_id = self.last_replayed_id
self.stats['time_in_delta_checksum'] = (self.stats.setdefault('time_in_delta_checksum', 0) + (time.time() - start_time))
self.record_checksum()
_hook
def apply_partition_differences(self, parts_to_drop: Optional[Set[str]], parts_to_add: Optional[Set[str]]) -> None:
if parts_to_add:
add_parts = []
for part_name in parts_to_add:
part_value = self.partition_value_for_name(self.table_name, part_name)
add_parts.append('PARTITION {} VALUES LESS THAN ({})'.format(part_name, part_value))
add_parts_str = ', '.join(add_parts)
add_sql = 'ALTER TABLE `{}` ADD PARTITION ({})'.format(self.new_table_name, add_parts_str)
log.info(add_sql)
self.execute_sql(add_sql)
if parts_to_drop:
drop_parts_str = ', '.join(parts_to_drop)
drop_sql = 'ALTER TABLE `{}` DROP PARTITION {}'.format(self.new_table_name, drop_parts_str)
log.info(drop_sql)
self.execute_sql(drop_sql)
_hook
def partition_value_for_name(self, table_name: str, part_name: str) -> str:
result = self.query(sql.fetch_partition_value, (self._current_db, table_name, part_name))
for r in result:
return r['PARTITION_DESCRIPTION']
raise RuntimeError(f'No partition value found for {table_name} {part_name}')
_hook
def list_partition_names(self, table_name: str) -> List[str]:
tbl_parts = []
result = self.query(sql.fetch_partition, (self._current_db, table_name))
for r in result:
tbl_parts.append(r['PARTITION_NAME'])
if (not tbl_parts):
raise RuntimeError(f'No partition values found for {table_name}')
return tbl_parts
_hook
def sync_table_partitions(self) -> None:
log.info('== Stage 5.1: Check table partitions are up-to-date ==')
if (not self.rm_partition):
return
if (not self.partitions):
return
partition_method = self.get_partition_method(self._current_db, self.new_table_name)
if (partition_method != 'RANGE'):
return
try:
new_tbl_parts = self.list_partition_names(self.new_table_name)
orig_tbl_parts = self.list_partition_names(self.table_name)
parts_to_drop = (set(new_tbl_parts) - set(orig_tbl_parts))
parts_to_add = (set(orig_tbl_parts) - set(new_tbl_parts))
if (('None' in parts_to_add) or ('None' in parts_to_drop)):
log.warning('MySQL claims either %s or %s are not partitioned', self.new_table_name, self.table_name)
return
if parts_to_drop:
log.info('Partitions missing from source table to drop from new table %s: %s', self.new_table_name, ', '.join(parts_to_drop))
if parts_to_add:
log.info('Partitions in source table to add to new table %s: %s', self.new_table_name, ', '.join(parts_to_add))
self.apply_partition_differences(parts_to_drop, parts_to_add)
except Exception:
log.exception('Unable to sync new table %s with orig table %s partitions', self.new_table_name, self.table_name)
_hook
def swap_tables(self):
if self.stop_before_swap:
return True
log.info('== Stage 6: Swap table ==')
self.stop_slave_sql()
self.execute_sql(sql.set_session_variable('autocommit'), (0,))
self.start_transaction()
stage_start_time = time.time()
self.lock_tables((self.new_table_name, self.table_name, self.delta_table_name))
log.info('Final round of replay before swap table')
self.checksum_required_for_replay = False
self.replay_changes(single_trx=True, holding_locks=True)
if self.mysql_version.is_mysql8:
self.execute_sql(sql.rename_all_tables(orig_name=self.table_name, old_name=self.renamed_table_name, new_name=self.new_table_name))
self.table_swapped = True
self.add_drop_table_entry(self.renamed_table_name)
log.info('Renamed {} TO {}, {} TO {}'.format(self.table_name, self.renamed_table_name, self.new_table_name, self.table_name))
else:
self.execute_sql(sql.rename_table(self.table_name, self.renamed_table_name))
log.info('Renamed {} TO {}'.format(self.table_name, self.renamed_table_name))
self.table_swapped = True
self.add_drop_table_entry(self.renamed_table_name)
self.execute_sql(sql.rename_table(self.new_table_name, self.table_name))
log.info('Renamed {} TO {}'.format(self.new_table_name, self.table_name))
log.info('Table has successfully swapped, new schema takes effect now')
self._cleanup_payload.remove_drop_table_entry(self._current_db, self.new_table_name)
self.commit()
self.unlock_tables()
self.stats['time_in_lock'] = (self.stats.setdefault('time_in_lock', 0) + (time.time() - stage_start_time))
self.execute_sql(sql.set_session_variable('autocommit'), (1,))
self.start_slave_sql()
self.stats['swap_table_progress'] = 'Swap table finishes'
def rename_back(self):
if (self.table_swapped and self.table_exists(self.renamed_table_name) and (not self.table_exists(self.table_name))):
self.unlock_tables()
self.execute_sql(sql.rename_table(self.renamed_table_name, self.table_name))
_hook
def cleanup(self):
log.info('== Stage 7: Cleanup ==')
cleanup_start_time = time.time()
try:
self.rename_back()
self.start_slave_sql()
if (self.is_myrocks_table and self.is_myrocks_ttl_table):
self.enable_ttl_for_myrocks()
self.release_osc_lock()
self.close_conn()
except Exception:
log.exception('Ignore following exception, because we want to try our best to cleanup, and free disk space:')
self._cleanup_payload.mysql_user = self.mysql_user
self._cleanup_payload.mysql_pass = self.mysql_pass
self._cleanup_payload.socket = self.socket
self._cleanup_payload.get_conn_func = self.get_conn_func
self._cleanup_payload.cleanup(self._current_db)
self.stats['time_in_cleanup'] = (time.time() - cleanup_start_time)
def print_stats(self):
log.info('Time in dump: {:.3f}s'.format(self.stats.get('time_in_dump', 0)))
log.info('Time in load: {:.3f}s'.format(self.stats.get('time_in_load', 0)))
log.info('Time in replay: {:.3f}s'.format(self.stats.get('time_in_replay', 0)))
log.info('Time in table checksum: {:.3f}s'.format(self.stats.get('time_in_table_checksum', 0)))
log.info('Time in delta checksum: {:.3f}s'.format(self.stats.get('time_in_delta_checksum', 0)))
log.info('Time in cleanup: {:.3f}s'.format(self.stats.get('time_in_cleanup', 0)))
log.info('Time holding locks: {:.3f}s'.format(self.stats.get('time_in_lock', 0)))
log.info(f"Outfile count: {self.stats.get('outfile_cnt', 0)}")
log.info(f"Outfile total rows: {self.stats.get('outfile_lines', 0)}")
if (not self.use_sql_wsenv):
log.info(f"Outfile total size: {self.stats.get('outfile_size', 0)} bytes")
def execute_steps_to_cutover(self):
self.sync_table_partitions()
self.swap_tables()
self.reset_no_pk_creation()
_hook
def run_ddl(self, db, sql):
try:
time_started = time.time()
self._new_table = self.parse_function(sql)
self._cleanup_payload.set_current_table(self.table_name)
self._current_db = db
self._current_db_dir = util.dirname_for_db(db)
self.init_connection(db)
self.init_table_obj()
self.determine_outfile_dir()
if self.force_cleanup:
self.cleanup_with_force()
if self.has_desired_schema():
self.release_osc_lock()
return
self.unblock_no_pk_creation()
self.pre_osc_check()
self.create_delta_table()
self.create_copy_table()
self.create_triggers()
self.start_snapshot()
self.select_table_into_outfile()
self.drop_non_unique_indexes()
self.load_data()
self.recreate_non_unique_indexes()
self.analyze_table()
self.checksum()
log.info('== Stage 5: Catch up to reduce time for holding lock ==')
self.replay_till_good2go(checksum=self.skip_delta_checksum, final_catchup=True)
self.execute_steps_to_cutover()
self.cleanup()
self.print_stats()
self.stats['wall_time'] = (time.time() - time_started)
except (MySQLdb.OperationalError, MySQLdb.ProgrammingError, MySQLdb.IntegrityError) as e:
(errnum, errmsg) = e.args
log.error('SQL execution error: [{}] {}\nWhen executing: {}\nWith args: {}'.format(errnum, errmsg, self._sql_now, self._sql_args_now))
if ((errnum in (2006, 2013)) and self.skip_cleanup_after_kill):
self._cleanup_payload.remove_drop_table_entry(self._current_db, self.new_table_name)
self._cleanup_payload.remove_drop_table_entry(self._current_db, self.delta_table_name)
self._cleanup_payload.remove_all_file_entries()
if (not self.keep_tmp_table):
self.cleanup()
raise OSCError('GENERIC_MYSQL_ERROR', {'stage': "running DDL on db '{}'".format(db), 'errnum': errnum, 'errmsg': errmsg}, mysql_err_code=errnum)
except Exception as e:
log.exception('{0} Exception raised, start to cleanup before exit {0}'.format(('-' * 10)))
if (not self.keep_tmp_table):
self.cleanup()
if (not isinstance(e, OSCError)):
raise OSCError('OSC_INTERNAL_ERROR', {'msg': str(e)})
else:
raise |
def validate_provider_uid(provider_uid, required=True):
if ((provider_uid is None) and (not required)):
return None
if ((not isinstance(provider_uid, str)) or (not provider_uid)):
raise ValueError('Invalid provider UID: "{0}". Provider UID must be a non-empty string.'.format(provider_uid))
return provider_uid |
class RtaEvents():
def __init__(self, events):
self.events: dict = self._normalize_event_timing(events)
def _normalize_event_timing(events):
for (agent_type, _events) in events.items():
events[agent_type] = normalize_timing_and_sort(_events)
return events
def _get_dump_dir(rta_name=None, host_id=None, host_os_family=None):
if (rta_name and host_os_family):
dump_dir = get_path('unit_tests', 'data', 'true_positives', rta_name, host_os_family)
os.makedirs(dump_dir, exist_ok=True)
return dump_dir
else:
time_str = time.strftime('%Y%m%dT%H%M%SL')
dump_dir = os.path.join(COLLECTION_DIR, (host_id or 'unknown_host'), time_str)
os.makedirs(dump_dir, exist_ok=True)
return dump_dir
def evaluate_against_rule_and_update_mapping(self, rule_id, rta_name, verbose=True):
from .utils import combine_sources, evaluate
rule = RuleCollection.default().id_map.get(rule_id)
assert (rule is not None), f'Unable to find rule with ID {rule_id}'
merged_events = combine_sources(*self.events.values())
filtered = evaluate(rule, merged_events)
if filtered:
sources = [e['agent']['type'] for e in filtered]
mapping_update = rta_mappings.add_rule_to_mapping_file(rule, len(filtered), rta_name, *sources)
if verbose:
click.echo('Updated rule-mapping file with: \n{}'.format(json.dumps(mapping_update, indent=2)))
elif verbose:
click.echo('No updates to rule-mapping file; No matching results')
def echo_events(self, pager=False, pretty=True):
echo_fn = (click.echo_via_pager if pager else click.echo)
echo_fn(json.dumps(self.events, indent=(2 if pretty else None), sort_keys=True))
def save(self, rta_name=None, dump_dir=None, host_id=None):
assert self.events, 'Nothing to save. Run Collector.run() method first or verify logging'
host_os_family = None
for key in self.events.keys():
if (self.events.get(key, {})[0].get('host', {}).get('id') == host_id):
host_os_family = self.events.get(key, {})[0].get('host', {}).get('os').get('family')
break
if (not host_os_family):
click.echo('Unable to determine host.os.family for host_id: {}'.format(host_id))
host_os_family = click.prompt('Please enter the host.os.family for this host_id', type=click.Choice(['windows', 'macos', 'linux']), default='windows')
dump_dir = (dump_dir or self._get_dump_dir(rta_name=rta_name, host_id=host_id, host_os_family=host_os_family))
for (source, events) in self.events.items():
path = os.path.join(dump_dir, (source + '.ndjson'))
with open(path, 'w') as f:
f.writelines([(json.dumps(e, sort_keys=True) + '\n') for e in events])
click.echo('{} events saved to: {}'.format(len(events), path)) |
class Oklab(Lab):
BASE = 'xyz-d65'
NAME = 'oklab'
SERIALIZE = ('--oklab',)
CHANNELS = (Channel('l', 0.0, 1.0, flags=FLG_OPT_PERCENT), Channel('a', (- 0.4), 0.4, flags=(FLG_MIRROR_PERCENT | FLG_OPT_PERCENT)), Channel('b', (- 0.4), 0.4, flags=(FLG_MIRROR_PERCENT | FLG_OPT_PERCENT)))
CHANNEL_ALIASES = {'lightness': 'l'}
WHITE = WHITES['2deg']['D65']
def to_base(self, oklab: Vector) -> Vector:
return oklab_to_xyz_d65(oklab)
def from_base(self, xyz: Vector) -> Vector:
return xyz_d65_to_oklab(xyz) |
def scale_island(island, uv_layer, scale_x, scale_y, pivot=None):
if (not pivot):
bbox = get_BBOX(island, None, uv_layer)
pivot = bbox['center']
for face in island:
for loop in face.loops:
(x, y) = loop[uv_layer].uv
xt = (x - pivot.x)
yt = (y - pivot.y)
xs = (xt * scale_x)
ys = (yt * scale_y)
loop[uv_layer].uv.x = (xs + pivot.x)
loop[uv_layer].uv.y = (ys + pivot.y) |
def match_values_to_active_cells(values, actind, num_cells) -> np.ndarray:
if (len(values) != len(actind)):
raise ValueError(f'Unexpected shape of values in init file: {np.asarray(values).shape}, expected to match grid dimensions {num_cells} or number of active cells {len(actind)}')
undef = (xtgeo.UNDEF_INT if np.issubdtype(values.dtype, np.integer) else xtgeo.UNDEF)
result = np.full(fill_value=undef, shape=num_cells, dtype=values.dtype)
result[actind] = values
return result |
class SplitWaitingDialog(QProgressDialog):
update_signal = pyqtSignal()
update_label = None
was_rejected = False
def __init__(self, parent, splitter, func, on_done, on_cancel):
self.splitter = splitter
super().__init__('', None, 0, 100, parent, (Qt.Window | Qt.WindowTitleHint))
self.setWindowModality(Qt.WindowModal)
self.setWindowTitle(_('Please wait'))
self.stage_progress = 0
def _on_done(future):
if self.was_rejected:
return
self.accept()
on_done(future)
future = app_state.app.run_in_thread(func, self, on_done=_on_done)
self.accepted.connect(future.cancel)
def _on_rejected():
self.was_rejected = True
future.cancel()
on_cancel()
self.rejected.connect(_on_rejected)
self.update_signal.connect(self.update)
self.update()
self.show()
def set_stage_progress(self, stage_progress):
self.stage_progress = max(0, min(0.99, stage_progress))
self.update_signal.emit()
def update(self):
self.setValue(max(1, int((self.stage_progress * 100))))
update_text = STAGE_NAMES[self.splitter.split_stage]
if (self.update_label is None):
self.update_label = QLabel(update_text)
self.setLabel(self.update_label)
else:
self.update_label.setText(update_text) |
class TestDeleteComponentTemplateRunner():
('elasticsearch.Elasticsearch')
.asyncio
async def test_deletes_all_index_templates(self, es):
es.cluster.delete_component_template = mock.AsyncMock()
r = runner.DeleteComponentTemplate()
params = {'templates': ['templateA', 'templateB'], 'request-params': {'timeout': 60}, 'only-if-exists': False}
result = (await r(es, params))
assert (result == {'weight': 2, 'unit': 'ops', 'success': True})
es.cluster.delete_component_template.assert_has_awaits([mock.call(name='templateA', params=params['request-params'], ignore=[404]), mock.call(name='templateB', params=params['request-params'], ignore=[404])])
('elasticsearch.Elasticsearch')
.asyncio
async def test_deletes_only_existing_component_templates(self, es):
es.cluster.exists_component_template = mock.AsyncMock(side_effect=[False, True])
es.cluster.delete_component_template = mock.AsyncMock()
r = runner.DeleteComponentTemplate()
params = {'templates': ['templateA', 'templateB'], 'request-params': {'timeout': 60}, 'only-if-exists': True}
result = (await r(es, params))
assert (result == {'weight': 1, 'unit': 'ops', 'success': True})
es.cluster.delete_component_template.assert_awaited_once_with(name='templateB', params=params['request-params'])
('elasticsearch.Elasticsearch')
.asyncio
async def test_param_templates_mandatory(self, es):
es.indices.delete_template = mock.AsyncMock()
r = runner.DeleteComponentTemplate()
params = {}
with pytest.raises(exceptions.DataError, match="Parameter source for operation 'delete-component-template' did not provide the mandatory parameter 'templates'. Add it to your parameter source and try again."):
(await r(es, params))
assert (es.indices.delete_template.await_count == 0) |
class DockPaneToggleGroup(Group):
id = 'DockPaneToggleGroup'
items = List()
task = Property(observe='parent.controller')
_property
def _get_task(self):
manager = self.get_manager()
if ((manager is None) or (manager.controller is None)):
return None
return manager.controller.task
dock_panes = Property(observe='task.window._states.items.dock_panes')
_property
def _get_dock_panes(self):
if ((self.task is None) or (self.task.window is None)):
return []
task_state = self.task.window._get_state(self.task)
return task_state.dock_panes
def get_manager(self):
manager = self
while isinstance(manager, Group):
manager = manager.parent
return manager
('dock_panes.items')
def _dock_panes_updated(self, event):
from pyface.action.action_item import ActionItem
self.destroy()
items = []
for dock_pane in self.dock_panes:
action = DockPaneToggleAction(dock_pane=dock_pane)
items.append(ActionItem(action=action))
items.sort(key=(lambda item: item.action.name))
self.items = items
manager = self.get_manager()
manager.changed = True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.