code stringlengths 281 23.7M |
|---|
class OptionPlotoptionsAreasplinerangeLowmarkerStatesHover(Options):
def animation(self) -> 'OptionPlotoptionsAreasplinerangeLowmarkerStatesHoverAnimation':
return self._config_sub_data('animation', OptionPlotoptionsAreasplinerangeLowmarkerStatesHoverAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get(None)
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(None)
def radius(self, num: float):
self._config(num, js_type=False)
def radiusPlus(self):
return self._config_get(2)
def radiusPlus(self, num: float):
self._config(num, js_type=False) |
class ResponseStream():
def __init__(self, scheduler, kwargs, response, n, request_ids, maximum_retries=20, chaos=None, stats: Stats=None):
self.scheduler: AsyncOpenAIAPI = scheduler
self.kwargs = kwargs
self.response = response
self.request_ids = request_ids
self.slices = [ResponseStreamSlice(self, self.view_kwargs(i), maximum_retries=maximum_retries) for i in range(n)]
self.chaos = chaos
self.stats = stats
self.stats.requests += 1
self.stats.sum_batch_size += n
trace_metric(kwargs, 'openai.requests', 1)
trace_metric(kwargs, 'openai.batch_size', n)
self.iteration_task = asyncio.create_task(self.iter_task())
def view_kwargs(self, i):
kwargs = self.kwargs.copy()
kwargs['prompt'] = kwargs['prompt'][i]
kwargs['request_id'] = self.request_ids[i]
return kwargs
def __del__(self):
self.iteration_task.cancel()
async def iter_task(self):
try:
self.response = aiter(self.response)
async for data in self.response:
if ((self.chaos is not None) and (random.random() > (1.0 - self.chaos))):
raise ChaosException('OpenAI API: ChaosException probabilistically triggered by chaos value {}'.format(self.chaos))
if (not ('choices' in data.keys())):
print('No choices in data', data)
continue
for c in data['choices']:
index = c['index']
self.stats.tokens += len(c['logprobs']['tokens'])
trace_metric(self.kwargs, 'openai.tokens', len(c['logprobs']['tokens']))
assert (c is not None)
self.slices[index].digest(c)
self.slices[index].finish_reason = c['finish_reason']
for c in self.slices:
c.finish()
except Exception as e:
for c in self.slices:
c.error(e)
def view(self, index):
assert (index < len(self.slices)), f'index {index} out of bounds for {len(self.slices)} slices of response stream'
return self.slices[index] |
class TaskWidget(QtWidgets.QGroupBox):
add_task = QtCore.Signal(object)
remove_task = QtCore.Signal(object)
version_updated = QtCore.Signal()
def __init__(self, parent=None, task=None):
super(TaskWidget, self).__init__(parent=parent)
self._task = None
self.main_layout = None
self.no_versions_place_holder = None
self.remove_button = None
self.pick_new_parent_button = None
self.asset_new_name_label = None
self.asset_new_name_line_edit = None
self.asset_new_name_validation_message_field = None
self.asset_new_code_label = None
self.asset_new_code_line_edit = None
self.asset_new_code_validation_message_field = None
self.child_widgets_layout = None
self.take_widgets = []
self.task_widgets = []
self._new_parent = None
self.new_parent_label = None
self.setup_ui()
self.task = task
def setup_ui(self):
self.main_layout = QtWidgets.QVBoxLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
buttons_layout = QtWidgets.QHBoxLayout()
buttons_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addLayout(buttons_layout)
self.remove_button = QtWidgets.QPushButton(self)
self.remove_button.setText('Remove Asset')
self.remove_button.setToolTip('Removes the asset from the list.')
self.remove_button.clicked.connect(self.remove_wrapper)
buttons_layout.addWidget(self.remove_button)
self.pick_new_parent_button = QtWidgets.QPushButton(self)
self.pick_new_parent_button.setText('Pick New Parent...')
self.pick_new_parent_button.setToolTip('Select the new parent...')
self.pick_new_parent_button.clicked.connect(self.pick_new_parent)
buttons_layout.addWidget(self.pick_new_parent_button)
if (isinstance(self.parent(), TaskWidget) and self.new_parent):
self.pick_new_parent_button.setVisible(False)
self.new_parent_label = QtWidgets.QLabel(self)
buttons_layout.addWidget(self.new_parent_label)
buttons_layout.addSpacerItem(QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
asset_new_name_and_code_layout = QtWidgets.QHBoxLayout()
asset_new_name_and_code_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addLayout(asset_new_name_and_code_layout)
validation_hint = 'First Letter : A-Z\nOther Letters: a-z A-Z 0-9\nNo empty spaces!\nNo underscore, no dash etc.\n'
self.asset_new_name_label = QtWidgets.QLabel(self)
self.asset_new_name_label.setText('Asset New Name')
asset_new_name_and_code_layout.addWidget(self.asset_new_name_label)
asset_new_name_layout = QtWidgets.QVBoxLayout()
asset_new_name_layout.setContentsMargins(0, 0, 0, 0)
self.asset_new_name_validation_message_field = QtWidgets.QLabel(self)
self.asset_new_name_validation_message_field.setStyleSheet('color: red;')
self.asset_new_name_line_edit = ValidatedLineEdit(parent=self, message_field=self.asset_new_name_validation_message_field)
self.asset_new_name_line_edit.setToolTip('New Asset Name\n\n{}'.format(validation_hint))
self.asset_new_name_line_edit.setFixedWidth(150)
self.asset_new_name_line_edit.editingFinished.connect(self.validate_asset_new_name)
asset_new_name_layout.addWidget(self.asset_new_name_line_edit)
asset_new_name_layout.addWidget(self.asset_new_name_validation_message_field)
asset_new_name_and_code_layout.addLayout(asset_new_name_layout)
self.asset_new_code_label = QtWidgets.QLabel(self)
self.asset_new_code_label.setText('Asset New Code')
asset_new_name_and_code_layout.addWidget(self.asset_new_code_label)
asset_new_code_layout = QtWidgets.QVBoxLayout()
asset_new_code_layout.setContentsMargins(0, 0, 0, 0)
self.asset_new_code_validation_message_field = QtWidgets.QLabel(self)
self.asset_new_code_validation_message_field.setStyleSheet('color: red;')
self.asset_new_code_line_edit = ValidatedLineEdit(parent=self, message_field=self.asset_new_code_validation_message_field)
self.asset_new_code_line_edit.setToolTip('New Asset Code\n\n{}'.format(validation_hint))
self.asset_new_code_line_edit.setFixedWidth(150)
self.asset_new_code_line_edit.editingFinished.connect(self.validate_asset_new_code)
asset_new_code_layout.addWidget(self.asset_new_code_line_edit)
asset_new_code_layout.addWidget(self.asset_new_code_validation_message_field)
asset_new_name_and_code_layout.addLayout(asset_new_code_layout)
asset_new_name_and_code_layout.addSpacerItem(QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.child_widgets_layout = QtWidgets.QVBoxLayout()
self.child_widgets_layout.setContentsMargins(12, 0, 12, 0)
self.no_versions_place_holder = QtWidgets.QLabel(self)
self.no_versions_place_holder.setText('--- No Versions ---')
self.no_versions_place_holder.setVisible(False)
self.child_widgets_layout.addWidget(self.no_versions_place_holder)
self.main_layout.addLayout(self.child_widgets_layout)
def remove_wrapper(self):
result = QtWidgets.QMessageBox.critical(self, 'Remove Task!', 'Remove this {}?'.format(self.task.entity_type), (QtWidgets.QMessageBox.StandardButton.Yes | QtWidgets.QMessageBox.StandardButton.No))
if (result == QtWidgets.QMessageBox.StandardButton.Yes):
self.remove()
self.version_updated.emit()
def remove(self):
task_widgets = copy.copy(self.task_widgets)
for child_task_widget in task_widgets:
child_task_widget.remove()
take_widgets = copy.copy(self.take_widgets)
for child_take_widget in take_widgets:
child_take_widget.remove()
EntityStorage.remove_entity(self.task)
self.deleteLater()
self.remove_task.emit(self)
def remove_child_task(self, task_widget):
for (i, t_widget) in enumerate(self.task_widgets):
if (t_widget == task_widget):
self.child_widgets_layout.removeWidget(t_widget)
self.task_widgets.pop(i)
break
def pick_new_parent(self):
dialog = task_picker_dialog.MainDialog(parent=self, project=None, allow_multi_selection=False)
if self.new_parent:
dialog.tasks_tree_view.find_and_select_entity_item(self.new_parent)
dialog.exec()
try:
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
accepted = QtWidgets.QDialog.Accepted
if (dialog.result() == accepted):
selected_tasks = dialog.tasks_tree_view.get_selected_tasks()
dialog.deleteLater()
if (not selected_tasks):
return
new_parent = selected_tasks[0]
if (self.task in new_parent.parents):
QtWidgets.QMessageBox.critical(self, 'Invalid Parent!', 'Selected parent is one of the child of the Asset!!!')
return
if (new_parent == self.task.parent):
QtWidgets.QMessageBox.critical(self, 'Invalid Parent!', 'Selected parent is already the current parent!!!')
return
self.new_parent = new_parent
self.validate()
def new_parent(self):
return self._new_parent
_parent.setter
def new_parent(self, new_parent):
self._new_parent = new_parent
if (isinstance(self.parent(), TaskWidget) and self._new_parent):
self.pick_new_parent_button.setVisible(False)
self.update_new_parent_label()
self.validate()
def update_new_parent_label(self):
new_parent_hierarchy_name = '-- No New Parent Selected --'
if self.new_parent:
new_parent_hierarchy_name = get_task_hierarchy_name(self.new_parent)
self.new_parent_label.setText('New Parent: {}'.format(new_parent_hierarchy_name))
def validate_asset_new_name(self):
if (not isinstance(self.task, Asset)):
self.asset_new_name_line_edit.set_valid()
return True
text = self.asset_new_name_line_edit.text()
match = TAKE_NAME_VALIDATOR_REGEX.match(text)
if ((not match) or (''.join(match.groups()) != text)):
self.asset_new_name_line_edit.set_invalid('Asset name is not in correct format')
return False
else:
self.asset_new_name_line_edit.set_valid()
return True
def validate_asset_new_code(self):
if (not isinstance(self.task, (Asset, Shot, Sequence))):
self.asset_new_code_line_edit.set_valid()
return True
text = self.asset_new_code_line_edit.text()
match = TAKE_NAME_VALIDATOR_REGEX.match(text)
if ((not match) or (''.join(match.groups()) != text)):
self.asset_new_code_line_edit.set_invalid('Asset code is not in correct format')
return False
else:
self.asset_new_code_line_edit.set_valid()
return True
def task(self):
return self._task
def task(self, task):
if (task is None):
return
self._task = task
self.setTitle(get_task_hierarchy_name(task))
EntityStorage.add_entity(self._task)
if isinstance(task, (Asset, Shot, Sequence)):
self.asset_new_name_line_edit.setText(task.name)
self.asset_new_code_line_edit.setText(task.code)
else:
self.asset_new_name_label.setVisible(False)
self.asset_new_name_line_edit.setVisible(False)
self.asset_new_code_validation_message_field.setVisible(False)
self.asset_new_code_label.setVisible(False)
self.asset_new_code_line_edit.setVisible(False)
self.asset_new_code_validation_message_field.setVisible(False)
self.remove_button.setText('Remove {}'.format(self._task.entity_type))
self.setStyleSheet('QGroupBox {{ background-color: {}; color: {}}}'.format(COLORS[self._task.entity_type.lower()]['bg'], COLORS[self._task.entity_type.lower()]['fg']))
take_names = get_unique_take_names(self._task.id)
for take in take_names:
take_widget = TakeWidget(parent=self, task=self._task, take=take)
self.child_widgets_layout.addWidget(take_widget)
take_widget.add_references.connect(self.add_task)
take_widget.version_updated.connect(self.version_updated)
self.take_widgets.append(take_widget)
if take_names:
self.no_versions_place_holder.setVisible(False)
if task.children:
self.no_versions_place_holder.setVisible(False)
def add_child_tasks(self, child_tasks):
tasks_added = []
for child_task in child_tasks:
intermediate_tasks = get_intermediate_tasks(self.task, child_task)
child_task_widget_tasks = [child_task_widget.task for child_task_widget in self.task_widgets]
for (i, task) in enumerate(intermediate_tasks):
if (task in self.task.children):
if (task not in child_task_widget_tasks):
task_widget = TaskWidget(parent=self, task=task)
task_widget.remove_task.connect(self.remove_child_task)
task_widget.version_updated.connect(self.version_updated.emit)
task_widget.add_task.connect(self.add_task)
task_widget.new_parent = self.task
self.task_widgets.append(task_widget)
self.child_widgets_layout.addWidget(task_widget)
tasks_added.append(task)
task_added = task
task_widget.add_child_tasks(intermediate_tasks[(i + 1):])
break
else:
for task_widget in self.task_widgets:
if (task_widget.task == task):
task_widget.add_child_tasks(intermediate_tasks[(i + 1):])
break
if tasks_added:
self.version_updated.emit()
def check_versions(self):
for take_widget in self.take_widgets:
take_widget.validate()
for task_widget in self.task_widgets:
task_widget.check_versions()
def is_enabled(self):
return any((take_widget.is_enabled() for take_widget in self.take_widgets))
def validate(self):
is_valid = (self.new_parent and all([take_widget.validate() for take_widget in self.take_widgets]) and all([task_widget.validate() for task_widget in self.task_widgets]) and self.validate_asset_new_name() and self.validate_asset_new_code())
if is_valid:
self.setStyleSheet('QGroupBox {{ background-color: {}; color:{}}}'.format(COLORS[self._task.entity_type.lower()]['bg'], COLORS[self._task.entity_type.lower()]['fg']))
else:
self.setStyleSheet('QGroupBox {{ background-color: {}; color: {}}}'.format(COLORS['invalid']['bg'], COLORS['invalid']['fg']))
return is_valid
def to_dict(self, dict_in=None):
dict_out = (dict_in if (dict_in is not None) else dict())
dict_out[self.task.id] = {}
if self.new_parent:
dict_out[self.task.id]['new_parent_id'] = self.new_parent.id
if isinstance(self.task, (Asset, Shot, Sequence)):
new_name = self.asset_new_name_line_edit.text()
if (new_name != self.task.name):
dict_out[self.task.id]['new_name'] = new_name
new_code = self.asset_new_code_line_edit.text()
if (new_code != self.task.code):
dict_out[self.task.id]['new_code'] = new_code
if (self.take_widgets and all([take_widget.is_enabled for take_widget in self.take_widgets])):
dict_out[self.task.id]['takes'] = dict(((take_widget.take, take_widget.to_dict()) for take_widget in self.take_widgets if take_widget.is_enabled()))
for task_widget in self.task_widgets:
task_widget.to_dict(dict_in=dict_out)
return dict_out |
class VertexNormalWeight(enum.Enum):
UNIFORM = (- 1)
ANGLE = 0
AREA = 1
COMBINED = 2
UNWEIGHTED = 3
def create_property(cls):
return bpy.props.EnumProperty(name='Vertex Normal Weight', description=('Determines how each vertex normal is calculated as the ' + 'weighted average of adjacent face normals'), default='ANGLE', items=[('UNIFORM', 'Uniform', 'Face normals are averaged evenly.', '', cls.UNIFORM.value), ('ANGLE', 'Corner Angle', (('Face normals are averaged according to the corner ' + 'angle of a shared vertex in each face. This is the ') + 'smooth shading approach used by Blender.'), '', cls.ANGLE.value), ('AREA', 'Face Area', ('Face normals are averaged according to the area of ' + 'each face.'), '', cls.AREA.value), ('COMBINED', 'Combined', ('Face normals are averaged according to both corner ' + 'angle and face area.'), '', cls.COMBINED.value), ('UNWEIGHTED', 'Unweighted', ('Face normals are not averaged; vertex normals are ' + 'fixed.'), '', cls.UNWEIGHTED.value)]) |
class MockSSEClientAdapter(testutils.MockAdapter):
def __init__(self, payload, recorder):
super(MockSSEClientAdapter, self).__init__(payload, 200, recorder)
def send(self, request, **kwargs):
resp = super(MockSSEClientAdapter, self).send(request, **kwargs)
resp.url = request.url
resp.status_code = self.status
resp.raw = io.BytesIO(self.data.encode())
resp.encoding = 'utf-8'
return resp |
()
def get_procedure_prescribed(patient, encounter=False):
hso = frappe.qb.DocType('Service Request')
return frappe.qb.from_(hso).select(hso.template_dn, hso.order_group, hso.invoiced, hso.practitioner, hso.order_date, hso.name).where((hso.patient == patient)).where((hso.status != 'Completed')).where((hso.template_dt == 'Clinical Procedure Template')).orderby(hso.creation, order=frappe.qb.desc).run() |
def move_island(island, dx, dy):
me = bpy.context.active_object.data
bm = bmesh.from_edit_mesh(me)
uv_layer = bm.loops.layers.uv.verify()
for face in island:
for loop in face.loops:
loop_uv = loop[uv_layer]
loop_uv.uv[0] += dx
loop_uv.uv[1] += dy
bmesh.update_edit_mesh(me) |
def _ranged_number(value, minimum, maximum, number_type):
value = number_type(value)
if ((minimum is not None) and (value < minimum)):
raise ValueError('{} is not greater than {}'.format(value, minimum))
if ((maximum is not None) and (value > maximum)):
raise ValueError('{} is not greater than {}'.format(value, minimum))
return value |
class NormalEig(dist.Distribution):
def __init__(self, mean, eig_vals, eig_vecs):
assert (mean.dim() == 1)
self.n = mean.shape[0]
assert (eig_vals.shape == (self.n,))
assert (eig_vecs.shape == (self.n, self.n))
self._mean = mean
self.eig_vecs = eig_vecs
self.sqrt_eig_vals = eig_vals.sqrt().unsqueeze(0)
self.sqrt_covar = (self.sqrt_eig_vals * eig_vecs)
self.log_sqrt_det = (eig_vals.log().sum() / 2.0)
self.base_dist = torch.distributions.normal.Normal(torch.zeros(1, self.n).to(dtype=eig_vals.dtype), torch.ones(1, self.n).to(dtype=eig_vals.dtype))
self.singular_eig_decompositions = (eig_vals, eig_vecs)
event_shape = self._mean.shape[(- 1):]
batch_shape = torch.broadcast_shapes(self.sqrt_covar.shape[:(- 2)], self._mean.shape[:(- 1)])
super().__init__(batch_shape, event_shape)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
z = torch.normal(mean=0.0, std=1.0, size=(self.n, 1))
z = z.to(dtype=self._mean.dtype)
return (self._mean + (self.sqrt_covar z).squeeze(1))
def log_prob(self, value):
assert (value.shape == (self.n,))
z = (((value - self._mean).unsqueeze(0) self.eig_vecs) / self.sqrt_eig_vals)
return (self.base_dist.log_prob(z).sum() - self.log_sqrt_det) |
def test_user_gets_previous_forms_assigned_to_him(client, msend):
client.post(u'/marko', headers={'Referer': 'tomatoes.com'}, data={'name': 'alice'})
f = Form.query.filter_by(host='tomatoes.com', email=u'marko').first()
f.confirm_sent = True
f.confirmed = True
DB.session.add(f)
DB.session.commit()
r = client.post('/register', data={'email': u'marko', 'password': 'russia'})
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (0 == len(forms))
(link, qs) = parse_confirmation_link_sent(msend.call_args[1]['text'])
client.get(link, query_string=qs)
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (0 == len(forms))
user = User.query.filter_by(email=u'marko').first()
user.plan = Plan.gold
DB.session.add(user)
DB.session.commit()
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (1 == len(forms))
assert (forms[0]['email'] == u'marko')
assert (forms[0]['host'] == 'tomatoes.com')
r = client.post('/', headers={'Referer': 'mark.com'}, data={'name': 'luke'})
f = Form.query.filter_by(host='mark.com', email='').first()
f.confirm_sent = True
f.confirmed = True
DB.session.add(f)
DB.session.commit()
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (1 == len(forms))
client.post('/account/add-email', data={'address': ''})
(link, qs) = parse_confirmation_link_sent(msend.call_args[1]['text'])
client.get(link, query_string=qs)
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (2 == len(forms))
assert (forms[0]['email'] == '')
assert (forms[0]['host'] == 'mark.com')
r = client.post(u'/marko', headers={'Referer': 'elsewhere.com'}, data={'name': 'luke'})
f = Form.query.filter_by(host='elsewhere.com', email=u'marko').first()
f.confirm_sent = True
f.confirmed = True
DB.session.add(f)
DB.session.commit()
r = client.get('/api-int/forms', headers={'Accept': 'application/json', 'Referer': settings.SERVICE_URL})
forms = json.loads(r.data.decode('utf-8'))['forms']
assert (3 == len(forms))
assert (forms[0]['email'] == u'marko')
assert (forms[0]['host'] == 'elsewhere.com') |
class FeeValidity(Document):
def validate(self):
self.update_status()
def update_status(self):
if (getdate(self.valid_till) < getdate()):
self.status = 'Expired'
elif (self.visited == self.max_visits):
self.status = 'Completed'
else:
self.status = 'Active' |
def test():
assert (doc.text == 'Ich mag niedliche Katzen und Faultiere.'), 'Bist du dir sicher, dass du den Text korrekt verarbeitet hast?'
assert (erster_token == doc[0]), 'Bist du dir sicher, dass du den ersten Token ausgewahlt hast?'
assert ('print(erster_token.text)' in __solution__), 'Druckst du den Text des Tokens?'
assert ('spacy.blank("de")' in __solution__), 'Rufst du spacy.blank mit der richtigen Sprache auf?'
__msg__.good('Sehr schon!') |
def test_embedded_message() -> None:
class Child(BaseMessage):
a: Annotated[(int, Field(1))] = 0
class Parent(BaseMessage):
c: Annotated[(Child, Field(3))] = field(default_factory=Child)
message = Parent(c=Child(a=150))
bytes_ = b'\x1a\x03\x08\x96\x01'
assert (Parent.loads(b'') == Parent())
assert (bytes(message) == bytes_)
assert (Parent.loads(bytes_) == message) |
(eq=False)
class BatchObject(MinimalBatchObject):
request_objects: List[RequestObject] = field(default_factory=list)
def serialize(self) -> MinimalBatchObject:
return MinimalBatchObject(uid=self.uid, requests_info=self.requests_info, model=self.model, source_id=self.source_id, status=self.status, created_at=self.created_at)
def __eq__(self, other):
return (super().__eq__(other) and (self.request_objects == other.request_objects)) |
class OptionSeriesColumnpyramidSonificationContexttracksMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Playblaster(object):
default_view_options = {'cameras': False, 'clipGhosts': False, 'cv': False, 'deformers': False, 'dimensions': False, 'displayAppearance': 'smoothShaded', 'displayLights': 'default', 'shadows': False, 'dynamics': True, 'dynamicConstraints': False, 'fluids': True, 'follicles': False, 'greasePencils': False, 'grid': False, 'handles': False, 'hairSystems': False, 'hulls': False, 'ikHandles': False, 'imagePlane': True, 'joints': False, 'lights': False, 'locators': False, 'manipulators': False, 'motionTrails': False, 'nCloths': True, 'nParticles': True, 'nRigids': False, 'nurbsCurves': False, 'nurbsSurfaces': False, 'particleInstancers': True, 'pivots': False, 'planes': True, 'pluginShapes': False, 'pluginObjects': ('gpuCacheDisplayFilter', True), 'polymeshes': True, 'strokes': False, 'subdivSurfaces': True, 'textures': False}
cam_attribute_names = ['overscan', 'filmFit', 'displayFilmGate', 'displayResolution', 'displayGateMask', 'displayFieldChart', 'displaySafeAction', 'displaySafeTitle', 'displayFilmPivot', 'displayFilmOrigin']
hardware_rendering_globals_attr_names = ['ssaoEnable', 'motionBlurEnable', 'multiSampleEnable']
global_playblast_options = {'fmt': 'image', 'forceOverwrite': 1, 'clearCache': 1, 'showOrnaments': 1, 'percent': 100, 'offScreen': 1, 'viewer': 0, 'compression': 'png', 'quality': 85, 'sequenceTime': 1}
hud_name = 'PlayblasterHUD'
def __init__(self, playblast_view_options=None, force_batch_mode=False):
self._playblast_view_options = None
self.playblast_view_options = playblast_view_options
self.batch_mode = (force_batch_mode or pm.general.about(batch=1))
self.logged_in_user = None
if (not self.batch_mode):
from stalker import LocalSession
local_session = LocalSession()
self.logged_in_user = local_session.logged_in_user
if (not self.logged_in_user):
raise RuntimeError('Please login first!')
self.version = None
from anima.dcc.mayaEnv import Maya
self.m_env = Maya()
self.version = self.m_env.get_current_version()
self.user_view_options = {}
self.reset_user_view_options_storage()
def check_sequence_name(self):
local_sequencers = [seq for seq in pm.ls(type='sequencer') if (seq.referenceFile() is None)]
if (not local_sequencers):
sequencer = pm.nt.Sequencer()
else:
sequencer = local_sequencers[0]
try:
sequence_name = sequencer.getAttr('sequence_name')
except pm.MayaAttributeError:
from anima.dcc.mayaEnv import previs
previs.Previs.add_sequence_name_attribute_to_sequencer(sequencer)
sequence_name = sequencer.getAttr('sequence_name')
if ((sequence_name == '') and (not self.batch_mode)):
result = pm.promptDialog(title='Please enter a Sequence Name', message='Sequence Name:', button=['OK', 'Cancel'], defaultButton='OK', cancelButton='Cancel', dismissString='Cancel')
if (result == 'OK'):
sequencer.setAttr('sequence_name', pm.promptDialog(query=True, text=True))
def get_hud_data(self):
current_shot = pm.sequenceManager(q=1, currentShot=1)
current_cam_name = 'NoCameraFound'
if current_shot:
shot_name = pm.getAttr(('%s.shotName' % current_shot))
current_cam_name = pm.shot(current_shot, q=1, cc=1)
if current_cam_name:
current_cam = pm.PyNode(current_cam_name)
else:
current_cam = pm.ls(type=pm.nt.Camera)[0].getParent()
current_cam_name = current_cam.name()
else:
import os
shot_name = os.path.split(pm.sceneName())[1].split('_')[0]
current_cam = self.get_active_panel_camera()
if (current_cam is not None):
current_cam_name = current_cam.name()
if isinstance(current_cam, pm.nt.Transform):
current_cam = current_cam.getShape()
focal_length = 0
if (current_cam is not None):
focal_length = current_cam.getAttr('focalLength')
sequencers = pm.ls(type='sequencer')
if sequencers:
sequencer = sequencers[0]
if (not sequencer.hasAttr('sequence_name')):
from anima.dcc.mayaEnv import previs
previs.Previs.add_sequence_name_attribute_to_sequencer(sequencer)
if (sequencer.getAttr('sequence_name') != ''):
shot_info = sequencer.getAttr('sequence_name')
else:
shot_info = 'INVALID'
else:
shot_info = shot_name
cf = (int(pm.currentTime(q=1)) + 1)
import timecode
frame_rate = 25
from anima.dcc import mayaEnv
maya_env = mayaEnv.Maya()
v = maya_env.get_current_version()
if v:
frame_rate = v.task.project.fps
tc = timecode.Timecode(frame_rate, frames=cf)
if current_shot:
start_time = pm.shot(current_shot, q=1, st=1)
end_time = pm.shot(current_shot, q=1, et=1)
else:
start_time = pm.playbackOptions(q=1, min=1)
end_time = pm.playbackOptions(q=1, max=1)
cs_frame = int((cf - start_time))
length = (int((end_time - start_time)) + 1)
if self.version:
user_name = (self.version.updated_by.name if self.version.updated_by else 'None')
elif self.logged_in_user:
user_name = self.logged_in_user.name
else:
user_name = pm.sceneName().split('_')[(- 1)]
hud_string = ('%s | %s:%smm | tc:%s [%s] | Shot: %s | Length: %s/%sfr | [%s]' % (shot_info, current_cam_name.split(':')[(- 1)], int(focal_length), tc, str((int(cf) - 1)).zfill(4), shot_name.split(':')[(- 1)], cs_frame, str(length).zfill(3), user_name))
return hud_string
def get_active_panel_camera(self):
active_panel = self.get_active_panel()
current_cam = None
try:
current_cam = pm.modelEditor(active_panel, q=1, cam=1)
except pm.MayaNodeError as e:
pass
if isinstance(current_cam, pm.nt.Transform):
current_cam = current_cam.getShape()
return current_cam
def create_hud(self, hud_name):
self.remove_hud(hud_name)
try:
pm.headsUpDisplay(hud_name, section=7, block=1, ao=1, blockSize='medium', labelFontSize='large', dfs='large', command=self.get_hud_data, atr=1)
except RuntimeError:
pm.headsUpDisplay(removePosition=(7, 1))
self.create_hud(hud_name)
def remove_hud(self, hud_name=None):
if (hud_name and pm.headsUpDisplay(hud_name, q=1, ex=1)):
pm.headsUpDisplay(hud_name, rem=1)
def get_shot_cameras(cls):
cameras = []
for shot in pm.sequenceManager(listShots=1):
camera_name = pm.shot(shot, q=1, cc=1)
camera = pm.PyNode(camera_name)
if isinstance(camera, pm.nt.Transform):
camera = camera.getShape()
cameras.append(camera)
return cameras
def get_selected_frame_range(self):
start_time = int(pm.playbackOptions(q=1, ast=1))
end_time = int(pm.playbackOptions(q=1, aet=1))
if (not self.batch_mode):
(selected_start_time, selected_end_time) = list(map(int, pm.timeControl(pm.melGlobals['$gPlayBackSlider'], q=1, rangeArray=True)))
if ((selected_end_time - selected_start_time) > 1):
start_time = selected_start_time
end_time = selected_end_time
return [start_time, end_time]
def is_frame_range_selected(self):
if (not self.batch_mode):
(start, end) = list(map(int, pm.timeControl(pm.melGlobals['$gPlayBackSlider'], q=1, rangeArray=True)))
return ((end - start) > 1)
else:
return False
def get_audio_node(cls):
audio_node_name = pm.timeControl(pm.melGlobals['$gPlayBackSlider'], q=1, sound=1)
try:
audio_node = pm.PyNode(audio_node_name)
except pm.MayaNodeError:
return
return audio_node
def reset_user_view_options_storage(self):
self.user_view_options = {'view_options': {}, 'huds': {}, 'camera_flags': {}, 'hardware_rendering_globals': {}}
def store_user_options(self):
active_panel = self.get_active_panel()
self.reset_user_view_options_storage()
for flag in self.default_view_options.keys():
try:
self.user_view_options['view_options'][flag] = pm.modelEditor(active_panel, **{'q': 1, flag: True})
except TypeError:
pass
hud_names = pm.headsUpDisplay(lh=1)
if hud_names:
for hud_name in hud_names:
self.user_view_options['huds'][hud_name] = pm.headsUpDisplay(hud_name, q=1, vis=1)
for camera in pm.ls(type='camera'):
camera_name = camera.name()
per_camera_attr_dict = {}
for attr in self.cam_attribute_names:
per_camera_attr_dict[attr] = camera.getAttr(attr)
self.user_view_options['camera_flags'][camera_name] = per_camera_attr_dict
hrg = pm.PyNode('hardwareRenderingGlobals')
for attr in self.hardware_rendering_globals_attr_names:
self.user_view_options['hardware_rendering_globals'][attr] = hrg.getAttr(attr)
def playblast_view_options(self):
return self._playblast_view_options
_view_options.setter
def playblast_view_options(self, playblast_view_options):
if (not playblast_view_options):
playblast_view_options = self.default_view_options
self._playblast_view_options = playblast_view_options
def set_view_options(self):
active_panel = self.get_active_panel()
pm.modelEditor(active_panel, e=1, **self.playblast_view_options)
hud_flags = pm.headsUpDisplay(lh=1)
if hud_flags:
for flag in hud_flags:
pm.headsUpDisplay(flag, e=1, vis=0)
for camera in pm.ls(type='camera'):
try:
camera.setAttr('overscan', 1)
except RuntimeError:
pass
try:
camera.setAttr('filmFit', 1)
except RuntimeError:
pass
try:
camera.setAttr('displayFilmGate', 1)
except RuntimeError:
pass
try:
camera.setAttr('displayResolution', 0)
except RuntimeError:
pass
hrg = pm.PyNode('hardwareRenderingGlobals')
hrg.setAttr('ssaoEnable', False)
hrg.setAttr('multiSampleEnable', True)
def restore_user_options(self):
active_panel = self.get_active_panel()
for (flag, value) in self.user_view_options['view_options'].items():
try:
pm.modelEditor(active_panel, **{'e': 1, flag: value})
except TypeError:
pass
for (hud, value) in self.user_view_options['huds'].items():
if pm.headsUpDisplay(hud, q=1, ex=1):
pm.headsUpDisplay(hud, e=1, vis=value)
for camera in pm.ls(type='camera'):
camera_name = camera.name()
try:
camera_flags = self.user_view_options['camera_flags'][camera_name]
except KeyError:
continue
for (attr, value) in camera_flags.items():
try:
camera.setAttr(attr, value)
except RuntimeError:
pass
hrg = pm.PyNode('hardwareRenderingGlobals')
for attr in self.hardware_rendering_globals_attr_names:
value = self.user_view_options['hardware_rendering_globals'][attr]
hrg.setAttr(attr, value)
self.remove_hud(self.hud_name)
def get_active_panel(cls):
active_panel = None
panel_list = pm.getPanel(type='modelPanel')
for panel in panel_list:
if pm.modelEditor(panel, q=1, av=1):
active_panel = panel
break
return active_panel
def playblast(self, extra_playblast_options=None):
shots = pm.ls(type='shot')
if (not extra_playblast_options):
extra_playblast_options = {}
(start, end) = self.get_selected_frame_range()
if (len(shots) and (not self.is_frame_range_selected())):
if (not self.batch_mode):
response = pm.confirmDialog(title='Which Camera?', message='Which Camera?', button=['Current', 'Shot Camera', 'Cancel'], defaultButton='Shot Camera', cancelButton='Cancel', dismissString='Cancel')
else:
response = 'Shot Camera'
if (response == 'Current'):
extra_playblast_options['sequenceTime'] = 0
elif (response == 'Shot Camera'):
extra_playblast_options['sequenceTime'] = 1
else:
return []
return self.playblast_all_shots(extra_playblast_options)
else:
extra_playblast_options['startTime'] = start
extra_playblast_options['endTime'] = end
return self.playblast_simple(extra_playblast_options)
def playblast_simple(self, extra_playblast_options=None):
import copy
playblast_options = copy.copy(self.global_playblast_options)
playblast_options['sequenceTime'] = False
playblast_options['percent'] = 100
if extra_playblast_options:
playblast_options.update(extra_playblast_options)
audio_node = self.get_audio_node()
if audio_node:
playblast_options['sound'] = audio_node
playblast_options['useTraxSounds'] = False
else:
playblast_options['useTraxSounds'] = True
if ('wh' not in playblast_options):
width = 1920
height = 1080
if self.version:
project = self.version.task.project
imf = project.image_format
width = int(imf.width)
height = int(imf.height)
playblast_options['wh'] = (width, height)
import os
if ('filename' not in playblast_options):
if self.version:
current_camera = self.get_active_panel_camera()
current_camera_name = 'Camera'
if (current_camera is not None):
current_camera_name = current_camera.getParent().name().split(':')[(- 1)]
filename = ('%s_%s' % (os.path.splitext(self.version.filename)[0], current_camera_name))
else:
filename = os.path.splitext(os.path.basename(pm.sceneName()))[0]
output_dir = os.path.join(os.path.dirname(pm.sceneName()), 'temp')
import tempfile
playblast_options['filename'] = os.path.join(output_dir, filename).replace('\\', '/')
from anima.dcc import mayaEnv
menv = mayaEnv.Maya()
fps = menv.get_fps()
result = []
try:
self.store_user_options()
self.set_view_options()
self.create_hud(self.hud_name)
import pprint
pprint.pprint(playblast_options)
for cam in pm.ls(type='camera'):
try:
cam.verticalFilmAperture.set(((cam.horizontalFilmAperture.get() * float(playblast_options['wh'][1])) / float(playblast_options['wh'][0])))
except (AttributeError, RuntimeError) as e:
pass
result = [{'video': pm.playblast(**playblast_options), 'audio': {'node': audio_node, 'offset': ((playblast_options.get('startTime', 0) - audio_node.offset.get()) if audio_node else 0), 'duration': ((playblast_options.get('endTime', 0) - playblast_options.get('startTime', 0)) + 1)}}]
finally:
self.restore_user_options()
video = self.convert_image_sequence_to_video(result, delete_source_sequence=True)
return video
def convert_image_sequence_to_video(cls, data, delete_source_sequence=False):
import os
import glob
frame_rate = 25
from anima.dcc import mayaEnv
maya_env = mayaEnv.Maya()
v = maya_env.get_current_version()
if v:
frame_rate = v.task.project.fps
new_result = []
original_image_sequence_path = ''
for output in data:
video_file_path = output
audio_data = None
if isinstance(output, dict):
video_file_path = output.get('video')
audio_data = output.get('audio')
original_image_sequence_path = video_file_path
if (video_file_path and ('#' in video_file_path)):
temp_str = video_file_path.replace('#', '*')
sequence = sorted(glob.glob(temp_str))
options = dict()
if sequence:
smallest_start_number = .0
for file_in_seq in sequence:
filename = os.path.basename(file_in_seq)
filename = filename.replace('.mov', '')
start_number = int(filename.split('.')[1])
if (start_number < smallest_start_number):
smallest_start_number = start_number
options['start_number'] = smallest_start_number
options['framerate'] = frame_rate
options['r'] = frame_rate
temp_str = video_file_path.replace('#', '')
hash_count = (len(video_file_path) - len(temp_str))
splits = video_file_path.split('#')
video_file_path = ('%s%s%s' % (splits[0], '%0{hash_count}d'.format(hash_count=hash_count), splits[(- 1)]))
video_file_path_h264 = splits[0].replace('.mov.', '.')
if audio_data:
audio_node = audio_data.get('node')
if audio_node:
audio_file_path = os.path.expandvars(audio_node.filename.get())
audio_offset = audio_data.get('offset', 0)
audio_duration = audio_data.get('duration', 0)
options['i'] = [os.path.normpath(video_file_path), os.path.normpath(audio_file_path)]
options['map'] = ['0:0', '1:0']
audio_offset_in_milli_seconds = int(((audio_offset * 1000) / frame_rate))
duration_in_milli_seconds = int(((audio_duration * 1000) / frame_rate))
from anima import utils
options['ss'] = [None, utils.milliseconds_to_tc(abs(audio_offset_in_milli_seconds))]
options['to'] = [None, utils.milliseconds_to_tc((abs(audio_offset_in_milli_seconds) + duration_in_milli_seconds))]
from anima.utils import MediaManager
mm = MediaManager()
video_file_path = mm.convert_to_h264(video_file_path, video_file_path_h264, options=options)
new_result.append(video_file_path)
if (delete_source_sequence and ('#' in original_image_sequence_path)):
try:
video_file_pattern = original_image_sequence_path.replace('#', '*')
import glob
for filename in glob.glob(video_file_pattern):
os.remove(filename)
except (OSError, AttributeError):
pass
return new_result
def playblast_shot(self, shot, extra_playblast_options=None):
import copy
shot_playblast_options = copy.copy(self.global_playblast_options)
shot_playblast_options.update({'sequenceTime': 1})
if extra_playblast_options:
shot_playblast_options.update(extra_playblast_options)
pm.select(cl=1)
self.check_sequence_name()
if ('wh' not in shot_playblast_options):
width = 1920
height = 1080
if self.version:
project = self.version.task.project
imf = project.image_format
width = int(imf.width)
height = int(imf.height)
shot_playblast_options['wh'] = (width, height)
try:
self.store_user_options()
self.set_view_options()
self.create_hud(self.hud_name)
temp_video_file_full_path = shot.playblast(options=shot_playblast_options)
finally:
self.restore_user_options()
return temp_video_file_full_path
def playblast_all_shots(self, extra_playblast_options=None):
shots = pm.ls(type='shot')
if (len(shots) <= 0):
raise RuntimeError('There are no Shots in your Camera Sequencer.')
pdm = ProgressManagerFactory.get_progress_manager()
pdm.end_progress()
caller = pdm.register(len(shots), 'Generating Playblasts...')
generic_playblast_options = {}
if extra_playblast_options:
generic_playblast_options.update(extra_playblast_options)
(range_start, range_end) = self.get_selected_frame_range()
generic_playblast_options['startTime'] = range_start
generic_playblast_options['endTime'] = range_end
audio_node = self.get_audio_node()
if audio_node:
generic_playblast_options.update({'useTraxSounds': False, 'sound': audio_node})
else:
generic_playblast_options['useTraxSounds'] = True
temp_video_file_full_paths = []
import copy
for shot in shots:
per_shot_playblast_options = copy.copy(generic_playblast_options)
shot_start_frame = shot.startFrame.get()
shot_end_frame = shot.endFrame.get()
per_shot_playblast_options['startTime'] = shot_start_frame
per_shot_playblast_options['endTime'] = shot_end_frame
if self.is_frame_range_selected():
if (((range_start > shot_start_frame) and (range_start > shot_end_frame)) or ((range_end < shot_start_frame) and (range_end < shot_end_frame))):
caller.step()
continue
temp_video_file_full_path = self.playblast_shot(shot, per_shot_playblast_options)
temp_video_file_full_paths.append({'video': temp_video_file_full_path[0], 'audio': {'node': audio_node, 'offset': ((audio_node.offset.get() - shot_start_frame) if audio_node else 0), 'duration': ((shot_end_frame - shot_start_frame) + 1)}})
caller.step()
return self.convert_image_sequence_to_video(temp_video_file_full_paths, delete_source_sequence=True)
def upload_outputs(cls, version, video_file_full_paths):
pdm = ProgressManagerFactory.get_progress_manager()
pdm.end_progress()
outputs = []
caller = pdm.register(len(video_file_full_paths), 'Uploading Playblasts...')
for output_file_full_path in video_file_full_paths:
output_path = cls.upload_output(version=version, output_file_full_path=output_file_full_path)
outputs.append(output_path)
caller.step()
return outputs
def upload_output(cls, version, output_file_full_path):
from stalker import Version
if (not isinstance(version, Version)):
raise RuntimeError('version should be a stalker version instance!')
hires_extension = '.mp4'
webres_extension = '.webm'
thumbnail_extension = '.png'
import os
if (not os.path.exists(output_file_full_path)):
raise RuntimeError(('Output file does not exits: %s' % output_file_full_path))
import os
output_file_name = os.path.basename(output_file_full_path)
hires_output_file_name = ('%s%s' % (os.path.splitext(output_file_name)[0], hires_extension))
webres_output_file_name = ('%s%s' % (os.path.splitext(output_file_name)[0], webres_extension))
thumbnail_output_file_name = ('%s%s' % (os.path.splitext(output_file_name)[0], thumbnail_extension))
task = version.task
hires_path = os.path.join(task.absolute_path, 'Outputs', 'Stalker_Pyramid', hires_output_file_name)
webres_path = os.path.join(task.absolute_path, 'Outputs', 'Stalker_Pyramid', 'ForWeb', webres_output_file_name)
thumbnail_path = os.path.join(task.absolute_path, 'Outputs', 'Stalker_Pyramid', 'Thumbnail', thumbnail_output_file_name)
try:
os.makedirs(os.path.dirname(hires_path))
except OSError:
pass
try:
os.makedirs(os.path.dirname(webres_path))
except OSError:
pass
try:
os.makedirs(os.path.dirname(thumbnail_path))
except OSError:
pass
import shutil
shutil.copy(output_file_full_path, hires_path)
from anima.utils import MediaManager
m = MediaManager()
temp_web_version_full_path = m.generate_media_for_web(output_file_full_path)
try:
shutil.copy(temp_web_version_full_path, webres_path)
except IOError:
pass
temp_thumbnail_full_path = m.generate_thumbnail(output_file_full_path)
try:
shutil.copy(temp_thumbnail_full_path, thumbnail_path)
except IOError:
pass
project = task.project
repo = project.repository
from stalker import Link
from stalker.db.session import DBSession
found = None
hires_os_independent_path = repo.to_os_independent_path(hires_path)
for output in version.outputs:
if (output.full_path == hires_os_independent_path):
found = True
break
if (not found):
l_hires = Link(full_path=repo.to_os_independent_path(hires_path), original_filename=hires_output_file_name)
l_for_web = Link(full_path=repo.to_os_independent_path(webres_path), original_filename=hires_output_file_name)
l_hires.thumbnail = l_for_web
version.outputs.append(l_hires)
l_thumb = Link(full_path=repo.to_os_independent_path(thumbnail_path), original_filename=hires_output_file_name)
l_for_web.thumbnail = l_thumb
DBSession.add_all([l_hires, l_for_web, l_thumb])
DBSession.commit()
return hires_path |
def __cookVacmAccessInfo(snmpEngine, groupName, contextName, securityModel, securityLevel):
mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
(vacmAccessEntry,) = mibBuilder.importSymbols('SNMP-VIEW-BASED-ACM-MIB', 'vacmAccessEntry')
tblIdx = vacmAccessEntry.getInstIdFromIndices(groupName, contextName, securityModel, securityLevel)
return (vacmAccessEntry, tblIdx) |
class Execution():
def __init__(self, path):
self.path = path
self.lib = ctypes.cdll.LoadLibrary(path)
self.lib.SetBackend.argtypes = [ctypes.c_char_p]
self.lib.SetBackend.restype = ctypes.c_char_p
self.lib.GetContracts.argtypes = []
self.lib.GetContracts.restype = ctypes.c_char_p
self.lib.GetAccounts.argtypes = []
self.lib.GetAccounts.restype = ctypes.c_char_p
self.lib.CommitTx.argtypes = [ctypes.c_char_p]
self.lib.CommitTx.restype = ctypes.c_char_p
self.lib.JumpState.argtypes = [ctypes.c_int]
self.lib.JumpState.restype = None
self.lib.SetBalance.argtypes = [ctypes.c_char_p]
self.lib.SetBalance.restype = None
def set_backend(self, proj_path):
proj_path = proj_path.encode('ascii')
bs = self.lib.SetBackend(proj_path)
j = json.loads(bs.decode())
loggers = [Logger(**l) for l in j]
return loggers
def get_contracts(self):
bs = self.lib.GetContracts()
j = json.loads(bs.decode())
return ContractManager(**j)
def get_accounts(self):
bs = self.lib.GetAccounts()
j = json.loads(bs.decode())
manager = AccountManager(**j)
return manager
def commit_tx(self, tx):
if (tx.method == Method.FALLBACK):
tx.method = ''
tx = tx.to_execution_str().encode('ascii')
bs = self.lib.CommitTx(tx)
j = json.loads(bs.decode())
logger = Logger(**j)
if (logger.tx.method == ''):
logger.tx.method = Method.FALLBACK
return logger
def jump_state(self, state_id):
self.lib.JumpState(state_id)
def set_balance(self, address, amount):
params = {'address': str(address), 'amount': str(amount)}
params = json.dumps(params).encode('ascii')
self.lib.SetBalance(params) |
.parametrize('current_data, reference_data, metric, expected_json', ((pd.DataFrame({'col': [1, 2, 3]}), None, ColumnDistributionMetric(column_name='col'), {'column_name': 'col'}), (pd.DataFrame({'col1': [1, 2, 3], 'col2': [10, 20, 3.5]}), pd.DataFrame({'col1': [10, 20, 3.5], 'col2': [1, 2, 3]}), ColumnDistributionMetric(column_name='col1'), {'column_name': 'col1'})))
def test_column_distribution_metric_with_report(current_data: pd.DataFrame, reference_data: pd.DataFrame, metric: ColumnDistributionMetric, expected_json: dict) -> None:
report = Report(metrics=[metric])
report.run(current_data=current_data, reference_data=reference_data, column_mapping=ColumnMapping())
assert report.show()
result_json = report.json()
assert (len(result_json) > 0)
result = json.loads(result_json)
assert (result['metrics'][0]['metric'] == 'ColumnDistributionMetric')
assert (result['metrics'][0]['result'] == expected_json) |
class OFPFlowStats(StringifyMixin):
def __init__(self):
super(OFPFlowStats, self).__init__()
self.length = None
self.table_id = None
self.match = None
self.duration_sec = None
self.duration_nsec = None
self.priority = None
self.idle_timeout = None
self.hard_timeout = None
self.cookie = None
self.packet_count = None
self.byte_count = None
self.actions = None
def parser(cls, buf, offset):
flow_stats = cls()
(flow_stats.length, flow_stats.table_id) = struct.unpack_from(ofproto.OFP_FLOW_STATS_0_PACK_STR, buf, offset)
offset += ofproto.OFP_FLOW_STATS_0_SIZE
flow_stats.match = OFPMatch.parse(buf, offset)
offset += ofproto.OFP_MATCH_SIZE
(flow_stats.duration_sec, flow_stats.duration_nsec, flow_stats.priority, flow_stats.idle_timeout, flow_stats.hard_timeout, flow_stats.cookie, flow_stats.packet_count, flow_stats.byte_count) = struct.unpack_from(ofproto.OFP_FLOW_STATS_1_PACK_STR, buf, offset)
offset += ofproto.OFP_FLOW_STATS_1_SIZE
flow_stats.actions = []
length = ofproto.OFP_FLOW_STATS_SIZE
while (length < flow_stats.length):
action = OFPAction.parser(buf, offset)
flow_stats.actions.append(action)
offset += action.len
length += action.len
return flow_stats |
class TestSwitchedActionValidation(unittest.TestCase):
def test_cannot_instantiate_base(self):
with self.assertRaises(TypeError) as error_context:
SwitchedAction(cast(ServerSettings, {}))
self.assertIn('instantiate', error_context.exception.args[0])
def test_map_is_none(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = None
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_is_not_iterable(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = 7
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_is_iterable_but_has_no_length(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = (x for x in range(10))
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_is_empty(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = ()
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_has_only_one_valid_item(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = ((5, ActionOne),)
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_has_multiple_items_but_one_is_invalid_switch(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = ((5, ActionOne), (TestSwitchedActionValidation, action_two))
self.assertIn('switch_to_action_map', error_context.exception.args[0])
def test_map_has_multiple_items_but_one_is_invalid_action(self):
with self.assertRaises(ValueError) as error_context:
class BadAction(SwitchedAction):
switch_to_action_map = ((5, ActionOne), (0, 7))
self.assertIn('switch_to_action_map', error_context.exception.args[0]) |
def downgrade():
op.add_column('datasets', sa.Column('dataset_type', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('datasets', sa.Column('fields', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.add_column('datasets', sa.Column('location', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('datasets', 'collections') |
class CreateTicketForm(FlaskForm):
def __init__(self, *args, **kwargs):
form = super(CreateTicketForm, self).__init__(*args, **kwargs)
self.priority.choices = [(p.id, p.priority) for p in FlicketPriority.query.all()]
self.category.choices = [(c.id, '{} - {}'.format(c.department.department, c.category)) for c in FlicketCategory.query.join(FlicketDepartment).order_by(FlicketDepartment.department).order_by(FlicketCategory.category).all() if c.department]
' Log in form. '
title = StringField(lazy_gettext('username'), validators=[DataRequired(), Length(min=field_size['title_min_length'], max=field_size['title_max_length'])])
content = PageDownField(lazy_gettext('content'), validators=[DataRequired(), Length(min=field_size['content_min_length'], max=field_size['content_max_length'])])
priority = SelectField(lazy_gettext('priority'), validators=[DataRequired()], coerce=int)
category = SelectField(lazy_gettext('category'), validators=[DataRequired()], coerce=int)
file = FileField(lazy_gettext('Upload Documents'), validators=[allowed_file_extension], render_kw={'multiple': True})
hours = DecimalField(lazy_gettext('hours'), default=0)
submit = SubmitField(lazy_gettext('Submit'), render_kw=form_class_button, validators=[DataRequired()]) |
_required
def change_default_card(cardid):
try:
customer = stripe.Customer.retrieve(current_user.stripe_id)
customer.default_source = cardid
customer.save()
card = customer.sources.retrieve(cardid)
flash('Successfully changed default payment source to your {} ending in {}'.format(card.brand, card.last4), 'success')
except Exception as e:
flash(u'Sorry something went wrong. If this error persists, please contact support', 'error')
g.log.warning('Failed to change default card', account=current_user.email, card=cardid)
return redirect(url_for('billing-dashboard')) |
class UserService():
async def register(*, obj: RegisterUser) -> None:
async with async_db_session.begin() as db:
username = (await UserDao.get_by_username(db, obj.username))
if username:
raise errors.ForbiddenError(msg='')
obj.nickname = (obj.nickname if obj.nickname else f'{random.randrange(10000, 99999)}')
nickname = (await UserDao.get_by_nickname(db, obj.nickname))
if nickname:
raise errors.ForbiddenError(msg='')
email = (await UserDao.check_email(db, obj.email))
if email:
raise errors.ForbiddenError(msg='')
(await UserDao.create(db, obj))
async def add(*, obj: AddUser) -> None:
async with async_db_session.begin() as db:
username = (await UserDao.get_by_username(db, obj.username))
if username:
raise errors.ForbiddenError(msg='')
obj.nickname = (obj.nickname if obj.nickname else f'{random.randrange(10000, 99999)}')
nickname = (await UserDao.get_by_nickname(db, obj.nickname))
if nickname:
raise errors.ForbiddenError(msg='')
dept = (await DeptDao.get(db, obj.dept_id))
if (not dept):
raise errors.NotFoundError(msg='')
for role_id in obj.roles:
role = (await RoleDao.get(db, role_id))
if (not role):
raise errors.NotFoundError(msg='')
email = (await UserDao.check_email(db, obj.email))
if email:
raise errors.ForbiddenError(msg='')
(await UserDao.add(db, obj))
async def pwd_reset(*, request: Request, obj: ResetPassword) -> int:
async with async_db_session.begin() as db:
op = obj.old_password
if (not (await password_verify((op + request.user.salt), request.user.password))):
raise errors.ForbiddenError(msg='')
np1 = obj.new_password
np2 = obj.confirm_password
if (np1 != np2):
raise errors.ForbiddenError(msg='')
count = (await UserDao.reset_password(db, request.user.id, obj.new_password, request.user.salt))
prefix = [f'{settings.TOKEN_REDIS_PREFIX}:{request.user.id}:', f'{settings.TOKEN_REFRESH_REDIS_PREFIX}:{request.user.id}:']
for i in prefix:
(await redis_client.delete_prefix(i))
return count
async def get_userinfo(*, username: str) -> User:
async with async_db_session() as db:
user = (await UserDao.get_with_relation(db, username=username))
if (not user):
raise errors.NotFoundError(msg='')
return user
async def update(*, request: Request, username: str, obj: UpdateUser) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
input_user = (await UserDao.get_with_relation(db, username=username))
if (not input_user):
raise errors.NotFoundError(msg='')
if (input_user.username != obj.username):
_username = (await UserDao.get_by_username(db, obj.username))
if _username:
raise errors.ForbiddenError(msg='')
if (input_user.nickname != obj.nickname):
nickname = (await UserDao.get_by_nickname(db, obj.nickname))
if nickname:
raise errors.ForbiddenError(msg='')
if (input_user.email != obj.email):
email = (await UserDao.check_email(db, obj.email))
if email:
raise errors.ForbiddenError(msg='')
count = (await UserDao.update_userinfo(db, input_user, obj))
return count
async def update_role(*, request: Request, username: str, obj: UpdateUserRole) -> None:
async with async_db_session.begin() as db:
if (not request.user.is_superuser):
if (request.user.username != username):
raise errors.ForbiddenError(msg='')
input_user = (await UserDao.get_with_relation(db, username=username))
if (not input_user):
raise errors.NotFoundError(msg='')
for role_id in obj.roles:
role = (await RoleDao.get(db, role_id))
if (not role):
raise errors.NotFoundError(msg='')
(await UserDao.update_role(db, input_user, obj))
async def update_avatar(*, request: Request, username: str, avatar: Avatar) -> int:
async with async_db_session.begin() as db:
if (not request.user.is_superuser):
if (request.user.username != username):
raise errors.ForbiddenError(msg='')
input_user = (await UserDao.get_by_username(db, username))
if (not input_user):
raise errors.NotFoundError(msg='')
count = (await UserDao.update_avatar(db, input_user, avatar))
return count
async def get_select(*, dept: int, username: str=None, phone: str=None, status: int=None) -> Select:
return (await UserDao.get_all(dept=dept, username=username, phone=phone, status=status))
async def update_permission(*, request: Request, pk: int) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
if (not (await UserDao.get(db, pk))):
raise errors.NotFoundError(msg='')
else:
if (pk == request.user.id):
raise errors.ForbiddenError(msg='')
count = (await UserDao.set_super(db, pk))
return count
async def update_staff(*, request: Request, pk: int) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
if (not (await UserDao.get(db, pk))):
raise errors.NotFoundError(msg='')
else:
if (pk == request.user.id):
raise errors.ForbiddenError(msg='')
count = (await UserDao.set_staff(db, pk))
return count
async def update_status(*, request: Request, pk: int) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
if (not (await UserDao.get(db, pk))):
raise errors.NotFoundError(msg='')
else:
if (pk == request.user.id):
raise errors.ForbiddenError(msg='')
count = (await UserDao.set_status(db, pk))
return count
async def update_multi_login(*, request: Request, pk: int) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
if (not (await UserDao.get(db, pk))):
raise errors.NotFoundError(msg='')
else:
count = (await UserDao.set_multi_login(db, pk))
token = (await get_token(request))
user_id = request.user.id
latest_multi_login = (await UserDao.get_multi_login(db, pk))
if (pk == user_id):
if (not latest_multi_login):
prefix = f'{settings.TOKEN_REDIS_PREFIX}:{pk}:'
(await redis_client.delete_prefix(prefix, exclude=(prefix + token)))
elif (not latest_multi_login):
prefix = f'{settings.TOKEN_REDIS_PREFIX}:{pk}:'
(await redis_client.delete_prefix(prefix))
return count
async def delete(*, request: Request, username: str) -> int:
async with async_db_session.begin() as db:
(await superuser_verify(request))
input_user = (await UserDao.get_by_username(db, username))
if (not input_user):
raise errors.NotFoundError(msg='')
count = (await UserDao.delete(db, input_user.id))
prefix = [f'{settings.TOKEN_REDIS_PREFIX}:{input_user.id}:', f'{settings.TOKEN_REFRESH_REDIS_PREFIX}:{input_user.id}:']
for i in prefix:
(await redis_client.delete_prefix(i))
return count |
def for_c_cpp(source):
if ('main' in source):
return source
imports = []
code = ['int main() {']
lines = source.replace(';', ';\n').split('\n')
for line in lines:
if line.lstrip().startswith('#include'):
imports.append(line)
else:
code.append(line)
code.append('}')
return '\n'.join((imports + code)) |
('textcat.teach', dataset=('The dataset to use', 'positional', None, str), spacy_model=('The base model', 'positional', None, str), source=('The source data as a JSONL file', 'positional', None, str), label=('One or more comma-separated labels', 'option', 'l', split_string), patterns=('Optional match patterns', 'option', 'p', str), exclude=('Names of datasets to exclude', 'option', 'e', split_string))
def textcat_teach(dataset: str, spacy_model: str, source: str, label: Optional[List[str]]=None, patterns: Optional[str]=None, exclude: Optional[List[str]]=None):
labels = label
stream = JSONL(source)
nlp = spacy.load(spacy_model)
name = 'textcat_multilabel'
if (name not in nlp.pipe_names):
pipe = nlp.add_pipe(name)
doc = nlp.make_doc('hello')
cats = {label: 0.5 for label in labels}
pipe.initialize(get_examples=(lambda : [Example.from_dict(doc, {'cats': cats})]))
else:
pipe = nlp.get_pipe(name)
model = TextClassifier(nlp, labels, name)
if (patterns is None):
predict = model
update = model.update
else:
matcher = PatternMatcher(nlp, prior_correct=5.0, prior_incorrect=5.0, label_span=False, label_task=True)
matcher = matcher.from_disk(patterns)
(predict, update) = combine_models(model, matcher)
stream = prefer_uncertain(predict(stream))
return {'view_id': 'classification', 'dataset': dataset, 'stream': stream, 'update': update, 'exclude': exclude, 'config': {'lang': nlp.lang}} |
class Table(Html.Html):
name = 'Google Table'
tag = 'div'
requirements = ('google-tables',)
def __init__(self, page: primitives.PageModel, records, width, height, html_code, options, profile):
(data, columns, self.__config) = ([], [], None)
super(Table, self).__init__(page, records, html_code=html_code, profile=profile, css_attrs={'width': width, 'height': height})
self.__options = options
def add_column(self, c):
raise NotImplementedError('Not yet available')
def _set_js_code(self, html_code: str, js_code: str):
self.dom.varName = ("document.getElementById('%s')" % html_code)
def define(self, options: dict, dataflows: List[dict]=None):
raise NotImplementedError('Not yet available')
def build(self, data=None, options=None, profile=None, component_id=None, dataflows: List[dict]=None):
self.js_code = component_id
return ("\n%(chartId)s = google.charts.setOnLoadCallback( (function(){\nvar data = new google.visualization.DataTable();\nvar tableData = %(data)s;\ntableData.rows.forEach(function(c){\n data.addColumn('string', c)});\ntableData.cols.forEach(function(c){\n data.addColumn('number', c)});\ndata.addRows(tableData.datasets);\n\nvar chart = new google.visualization.%(type)s(%(varId)s);\nchart.draw(data, {});\nreturn chart\n}))" % {'chartId': self.js_code, 'varId': (component_id or self.dom.varId), 'data': JsUtils.dataFlows(data, dataflows, self.page), 'type': self.__options['type']})
def __str__(self):
self.page.properties.js.add_builders(self.refresh())
return ('<%s %s></%s>' % (self.tag, self.get_attrs(css_class_names=self.style.get_classes()), self.tag)) |
class FacebookSAMPredictor():
model: FacebookSAM
def set_image(self, image: NDArrayUInt8, image_format: str='RGB') -> None:
...
def predict(self, point_coords: (NDArray | None)=None, point_labels: (NDArray | None)=None, box: (NDArray | None)=None, mask_input: (NDArray | None)=None, multimask_output: bool=True, return_logits: bool=False) -> tuple[(NDArray, NDArray, NDArray)]:
... |
def combining_all_pictures():
new_folder_path = (os.getcwd() + '/Photos')
for f in os.listdir():
ch = f.split('.')
if ((ch[(- 1)] == 'jpeg') or (ch[(- 1)] == 'png') or (ch[(- 1)] == 'jpg') or (ch[(- 1)] == 'obj') or (ch[(- 1)] == 'JPEG')):
current_file = ((os.getcwd() + '/') + f)
shutil.move(current_file, new_folder_path)
print('Success') |
class DCGAN():
def __init__(self, batch_size=100, image_shape=[64, 64, 3], dim_z=100, dim_W1=1024, dim_W2=512, dim_W3=256, dim_W4=128, dim_W5=3):
self.batch_size = batch_size
self.image_shape = image_shape
self.dim_z = dim_z
self.dim_W1 = dim_W1
self.dim_W2 = dim_W2
self.dim_W3 = dim_W3
self.dim_W4 = dim_W4
self.dim_W5 = dim_W5
self.gen_W1 = tf.Variable(tf.random_normal([dim_z, ((dim_W1 * 4) * 4)], stddev=0.02), name='gen_W1')
self.gen_bn_g1 = tf.Variable(tf.random_normal([((dim_W1 * 4) * 4)], mean=1.0, stddev=0.02), name='gen_bn_g1')
self.gen_bn_b1 = tf.Variable(tf.zeros([((dim_W1 * 4) * 4)]), name='gen_bn_b1')
self.gen_W2 = tf.Variable(tf.random_normal([5, 5, dim_W2, dim_W1], stddev=0.02), name='gen_W2')
self.gen_bn_g2 = tf.Variable(tf.random_normal([dim_W2], mean=1.0, stddev=0.02), name='gen_bn_g2')
self.gen_bn_b2 = tf.Variable(tf.zeros([dim_W2]), name='gen_bn_b2')
self.gen_W3 = tf.Variable(tf.random_normal([5, 5, dim_W3, dim_W2], stddev=0.02), name='gen_W3')
self.gen_bn_g3 = tf.Variable(tf.random_normal([dim_W3], mean=1.0, stddev=0.02), name='gen_bn_g3')
self.gen_bn_b3 = tf.Variable(tf.zeros([dim_W3]), name='gen_bn_b3')
self.gen_W4 = tf.Variable(tf.random_normal([5, 5, dim_W4, dim_W3], stddev=0.02), name='gen_W4')
self.gen_bn_g4 = tf.Variable(tf.random_normal([dim_W4], mean=1.0, stddev=0.02), name='gen_bn_g4')
self.gen_bn_b4 = tf.Variable(tf.zeros([dim_W4]), name='gen_bn_b4')
self.gen_W5 = tf.Variable(tf.random_normal([5, 5, dim_W5, dim_W4], stddev=0.02), name='gen_W5')
self.discrim_W1 = tf.Variable(tf.random_normal([5, 5, dim_W5, dim_W4], stddev=0.02), name='discrim_W1')
self.discrim_W2 = tf.Variable(tf.random_normal([5, 5, dim_W4, dim_W3], stddev=0.02), name='discrim_W2')
self.discrim_bn_g2 = tf.Variable(tf.random_normal([dim_W3], mean=1.0, stddev=0.02), name='discrim_bn_g2')
self.discrim_bn_b2 = tf.Variable(tf.zeros([dim_W3]), name='discrim_bn_b2')
self.discrim_W3 = tf.Variable(tf.random_normal([5, 5, dim_W3, dim_W2], stddev=0.02), name='discrim_W3')
self.discrim_bn_g3 = tf.Variable(tf.random_normal([dim_W2], mean=1.0, stddev=0.02), name='discrim_bn_g3')
self.discrim_bn_b3 = tf.Variable(tf.zeros([dim_W2]), name='discrim_bn_b3')
self.discrim_W4 = tf.Variable(tf.random_normal([5, 5, dim_W2, dim_W1], stddev=0.02), name='discrim_W4')
self.discrim_bn_g4 = tf.Variable(tf.random_normal([dim_W1], mean=1.0, stddev=0.02), name='discrim_bn_g4')
self.discrim_bn_b4 = tf.Variable(tf.zeros([dim_W1]), name='discrim_bn_b4')
self.discrim_W5 = tf.Variable(tf.random_normal([((4 * 4) * dim_W1), 1], stddev=0.02), name='discrim_W5')
self.gen_params = [self.gen_W1, self.gen_bn_g1, self.gen_bn_b1, self.gen_W2, self.gen_bn_g2, self.gen_bn_b2, self.gen_W3, self.gen_bn_g3, self.gen_bn_b3, self.gen_W4, self.gen_bn_g4, self.gen_bn_b4, self.gen_W5]
self.discrim_params = [self.discrim_W1, self.discrim_W2, self.discrim_bn_g2, self.discrim_bn_b2, self.discrim_W3, self.discrim_bn_g3, self.discrim_bn_b3, self.discrim_W4, self.discrim_bn_g4, self.discrim_bn_b4, self.discrim_W5]
def build_model(self):
Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])
image_real = tf.placeholder(tf.float32, ([self.batch_size] + self.image_shape))
image_gen = self.generate(Z)
(p_real, h_real) = self.discriminate(image_real)
(p_gen, h_gen) = self.discriminate(image_gen)
discrim_cost_real = bce(p_real, tf.ones_like(p_real))
discrim_cost_gen = bce(p_gen, tf.zeros_like(p_gen))
discrim_cost = (tf.reduce_mean(discrim_cost_real) + tf.reduce_mean(discrim_cost_gen))
gen_cost = tf.reduce_mean(bce(p_gen, tf.ones_like(p_gen)))
return (Z, image_real, discrim_cost, gen_cost, p_real, p_gen, h_real, h_gen)
def discriminate(self, image):
h1 = lrelu(tf.nn.conv2d(image, self.discrim_W1, strides=[1, 2, 2, 1], padding='SAME'))
h2 = lrelu(batchnormalize(tf.nn.conv2d(h1, self.discrim_W2, strides=[1, 2, 2, 1], padding='SAME'), g=self.discrim_bn_g2, b=self.discrim_bn_b2))
h3 = lrelu(batchnormalize(tf.nn.conv2d(h2, self.discrim_W3, strides=[1, 2, 2, 1], padding='SAME'), g=self.discrim_bn_g3, b=self.discrim_bn_b3))
h4 = lrelu(batchnormalize(tf.nn.conv2d(h3, self.discrim_W4, strides=[1, 2, 2, 1], padding='SAME'), g=self.discrim_bn_g4, b=self.discrim_bn_b4))
h4 = tf.reshape(h4, [self.batch_size, (- 1)])
h5 = tf.matmul(h4, self.discrim_W5)
y = tf.nn.sigmoid(h5)
return (y, h5)
def generate(self, Z):
h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1), g=self.gen_bn_g1, b=self.gen_bn_b1))
h1 = tf.reshape(h1, [self.batch_size, 4, 4, self.dim_W1])
output_shape_l2 = [self.batch_size, 8, 8, self.dim_W2]
h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1, 2, 2, 1])
h2 = tf.nn.relu(batchnormalize(h2, g=self.gen_bn_g2, b=self.gen_bn_b2))
output_shape_l3 = [self.batch_size, 16, 16, self.dim_W3]
h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1, 2, 2, 1])
h3 = tf.nn.relu(batchnormalize(h3, g=self.gen_bn_g3, b=self.gen_bn_b3))
output_shape_l4 = [self.batch_size, 32, 32, self.dim_W4]
h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1, 2, 2, 1])
h4 = tf.nn.relu(batchnormalize(h4, g=self.gen_bn_g4, b=self.gen_bn_b4))
output_shape_l5 = [self.batch_size, 64, 64, self.dim_W5]
h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1, 2, 2, 1])
x = tf.nn.tanh(h5)
return x
def samples_generator(self, batch_size):
Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])
h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1)))
h1 = tf.reshape(h1, [batch_size, 4, 4, self.dim_W1])
output_shape_l2 = [batch_size, 8, 8, self.dim_W2]
h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1, 2, 2, 1])
h2 = tf.nn.relu(batchnormalize(h2))
output_shape_l3 = [batch_size, 16, 16, self.dim_W3]
h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1, 2, 2, 1])
h3 = tf.nn.relu(batchnormalize(h3))
output_shape_l4 = [batch_size, 32, 32, self.dim_W4]
h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1, 2, 2, 1])
h4 = tf.nn.relu(batchnormalize(h4))
output_shape_l5 = [batch_size, 64, 64, self.dim_W5]
h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1, 2, 2, 1])
x = tf.nn.tanh(h5)
return (Z, x) |
def setup_logger(logger, fname, stream=True, mode=logging.ERROR):
if (not os.path.isabs(fname)):
path = os.path.join(ETSConfig.application_home, fname)
else:
path = fname
handlers = logger.handlers
if (len(handlers) > 1):
h = handlers[0]
if (isinstance(h, LogFileHandler) and (h.baseFilename == path)):
logger.info('Logging handlers already set! Not duplicating.')
return
logger.setLevel(logging.DEBUG)
basedir = os.path.dirname(path)
os.makedirs(basedir, exist_ok=True)
handler = LogFileHandler(path)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if stream:
s = logging.StreamHandler()
s.setFormatter(FORMATTER)
s.setLevel(mode)
logger.addHandler(s)
logger.info(('*' * 80))
logger.info("logfile is: '%s'", os.path.abspath(path))
logger.info(('*' * 80)) |
def yes_no_input(prompt, default_answer, timeout=5):
ans = raw_input_with_timeout(prompt=prompt, default_answer=default_answer, timeout=timeout)
if ((ans is None) or (ans == '')):
return default_answer
ans = str(ans).lower()
if (ans[0] == 'n'):
return False
else:
return True |
class AddError(BaseGenTableTest):
def runTest(self):
self.do_add(vlan_vid=10000, ipv4=, mac=(0, 1, 2, 3, 4, 5))
do_barrier(self.controller)
(error, _) = self.controller.poll(ofp.OFPT_ERROR, 0)
self.assertIsInstance(error, ofp.message.bsn_gentable_error)
self.assertEquals(len(self.do_entry_desc_stats()), 0)
self.do_add(vlan_vid=100, ipv4=, mac=(1, 1, 2, 3, 4, 5))
do_barrier(self.controller)
(error, _) = self.controller.poll(ofp.OFPT_ERROR, 0)
self.assertIsInstance(error, ofp.message.bsn_gentable_error)
self.assertEquals(len(self.do_entry_desc_stats()), 0) |
def SF(name_or_sf, **params):
if isinstance(name_or_sf, collections.abc.Mapping):
if params:
raise ValueError('SF() cannot accept parameters when passing in a dict.')
kwargs = {}
sf = name_or_sf.copy()
for k in ScoreFunction._param_defs:
if (k in name_or_sf):
kwargs[k] = sf.pop(k)
if (not sf):
name = 'boost_factor'
elif (len(sf) == 1):
(name, params) = sf.popitem()
else:
raise ValueError(f'SF() got an unexpected fields in the dictionary: {sf!r}')
if (not isinstance(params, collections.abc.Mapping)):
params = {'value': params}
kwargs.update(params)
return ScoreFunction.get_dsl_class(name)(**kwargs)
if isinstance(name_or_sf, ScoreFunction):
if params:
raise ValueError('SF() cannot accept parameters when passing in a ScoreFunction object.')
return name_or_sf
return ScoreFunction.get_dsl_class(name_or_sf)(**params) |
def extractNanodesuZeroKaraHajimeruMahouNoSho(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def parse_node(tags):
keys = tags.keys()
for key in ['amenity', 'natural', 'highway', 'barrier', 'shop', 'tourism', 'public_transport', 'emergency', 'man_made']:
if (key in keys):
if ('disused' in tags[key]):
continue
return f'{key}:{tags[key]}'
return None |
def test_to_dict():
string = system_info.to_dict()
expected = ['cli_args', 'is_briefcase_package', 'is_flatpak_package', 'platform', 'pyside6_version', 'qt_version', 'qt_library_path', 'config_directory', 'normcap_version', 'tesseract_path', 'tessdata_path', 'envs', 'desktop_environment', 'display_manager_is_wayland', 'screens']
for item in expected:
assert (item in string) |
class _UnionFind():
def __init__(self):
self.lookup = WeakKeyDictionary()
def new_node(self, val):
if (val not in self.lookup):
self.lookup[val] = val
def find(self, val):
parent = self.lookup[val]
while (val is not parent):
grandparent = self.lookup[parent]
self.lookup[val] = grandparent
(val, parent) = (parent, grandparent)
return val
def union(self, val1, val2):
(p1, p2) = (self.find(val1), self.find(val2))
if (p1 is p2):
pass
else:
self.lookup[p2] = p1
def check_eqv(self, val1, val2):
(p1, p2) = (self.find(val1), self.find(val2))
return (p1 is p2)
def copy_entire_UF(self):
copy = _UnionFind()
for (v, p) in self.lookup.items():
copy.lookup[v] = p
return copy |
def exec_cmd(cmd: str) -> bool:
try:
args = shlex.split(cmd)
if logger.isEnabledFor(logging.DEBUG):
check_call(args)
else:
check_call(args, stderr=DEVNULL, stdout=DEVNULL)
return True
except CalledProcessError as e:
logger.debug(str(e))
return False |
def merge_arg_dicts(source, target, parent=''):
for (k, v) in source.items():
if (not k.startswith('_')):
if (k not in target):
target[k] = v
else:
log.warning('%s: [%s] argument cannot be manually set', parent, k)
return target |
class ouraSleepSamples(Base):
__tablename__ = 'oura_sleep_samples'
timestamp_local = Column('timestamp_local', DateTime(), index=True, primary_key=True)
summary_date = Column('summary_date', Date())
report_date = Column('report_date', Date())
rmssd_5min = Column('rmssd_5min', Integer())
hr_5min = Column('hr_5min', Integer())
hypnogram_5min = Column('hypnogram_5min', Integer())
hypnogram_5min_desc = Column('hypnogram_5min_desc', String(8)) |
class RoughVizScatter(RoughVizBar):
def data(self):
return self._config_sub_data('data', OptionDataXY)
def colors(self):
return self._config_get()
def colors(self, values):
self._config(values)
def colorVar(self):
return self._config_get()
def colorVar(self, text):
self._config(text)
def highlightLabel(self):
return self._config_get()
def highlightLabel(self, text):
self._config(text)
def radius(self):
return self._config_get(8)
def radius(self, num):
self._config(num) |
.parametrize('delimiter, csv_content', [(None, 'id, env, status\n, prod, active\n, nonprod, active\n, dev, suspended'), (',', 'id, env, status\n, prod, active\n, nonprod, active\n, dev, suspended'), ('\t', 'id\tenv\tstatus\n\tprod\tactive\n\tnonprod\tactive\n\tdev\tsuspended')])
def test_csv_account_loader_with_file_url(tmp_path, mocker, csv_content, delimiter, expected_from_loader):
csv_file = (tmp_path / 'accts.csv')
with csv_file.open('w') as f:
f.write(csv_content)
mock_mal = mocker.patch('awsrun.acctload.MetaAccountLoader.__init__')
url = ('file://' + csv_file.as_posix())
if delimiter:
acctload.CSVAccountLoader(url, delimiter=delimiter)
else:
acctload.CSVAccountLoader(url)
((accts,), _) = mock_mal.call_args
accts = [dict(a) for a in accts]
assert (accts == expected_from_loader) |
def build_repository_service(cloud_provider: CloudProvider, env: str='STAGING', unsigned_enabled: bool=False) -> OneDockerRepositoryService:
if (cloud_provider != CloudProvider.AWS):
raise NotImplementedError('Only AWS is supported for building Repository Service for now.')
repository_path = REPOSITORY_PATHS[env]
region = S3Path(repository_path).region
storage_svc = S3StorageService(region, unsigned_enabled=unsigned_enabled)
metadata_svc = MetadataService(region=region, table_name=METADATA_TABLES[env], key_name=METADATA_TABLE_KEY_NAME)
return OneDockerRepositoryService(storage_svc=storage_svc, package_repository_path=repository_path, metadata_svc=metadata_svc) |
class TestPluginBaseOffline():
def test_get_view_file_path(self):
code_path = (((PLUGIN_PATH / 'file_type') / 'code') / 'file_type.py')
expected_view_path = (((PLUGIN_PATH / 'file_type') / 'view') / 'file_type.html')
assert (AnalysisBasePlugin._get_view_file_path(str(code_path)) == expected_view_path)
without_view = (((PLUGIN_PATH / 'dummy') / 'code') / 'dummy.py')
assert (AnalysisBasePlugin._get_view_file_path(str(without_view)) is None) |
class TestWindowHide():
def test_iconify_when_tray_icon_plugin_is_not_registered(self, window, bus, mocker):
subscriber = mocker.Mock()
bus.connect(Events.WINDOW_HIDE, subscriber, weak=False)
result = window.hide()
assert (result is Gtk.true)
subscriber.assert_called_once_with(Events.WINDOW_HIDE, payload=None)
def test_deletes_when_tray_icon_plugin_is_registered(self, bus, graph, mocker, window):
graph.register_factory(Systray, mocker.Mock)
subscriber = mocker.Mock()
bus.connect(Events.WINDOW_HIDE, subscriber, weak=False)
window.widget.set_visible(True)
result = window.hide()
assert result
assert (window.widget.get_visible() is False)
subscriber.assert_called_once_with(Events.WINDOW_HIDE, payload=None) |
class RingBuffer():
def __init__(self, length):
self.buffer = bytearray(length)
self.length = length
self.count = 0
def append(self, data):
if (len(data) > self.length):
self.count += (int((len(data) / self.length)) * self.length)
data = data[(- self.length):]
begbyte = (self.count % self.length)
endbyte = ((((self.count + len(data)) - 1) % self.length) + 1)
if (endbyte < begbyte):
numbytes = ((self.length - begbyte) + 1)
self.buffer[begbyte:self.length] = data[0:numbytes]
self.buffer[0:endbyte] = data[numbytes:]
elif (endbyte > begbyte):
self.buffer[begbyte:endbyte] = data
else:
pass
self.count += len(data)
def read(self, begbyte, endbyte):
if (self.count > self.length):
begavailable = (self.count - self.length)
else:
begavailable = 0
endavailable = self.count
if (begbyte < begavailable):
raise RuntimeError('Cannot read before the start of the available data.')
elif ((endbyte > endavailable) or (begbyte > (endavailable - 1))):
raise RuntimeError('Cannot read past the end of the available data.')
elif (endbyte < begbyte):
raise RuntimeError('Invalid selection.')
begbyte = (begbyte % self.length)
endbyte = (((endbyte - 1) % self.length) + 1)
if (endbyte <= begbyte):
data = (self.buffer[begbyte:] + self.buffer[0:endbyte])
else:
data = self.buffer[begbyte:endbyte]
return data |
.parallel(nprocs=2)
def test_parallel_gather():
from mpi4py import MPI
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'CG', 1)
f = Function(V)
v = f.vector()
rank = MPI.COMM_WORLD.rank
v[:] = rank
assert (f.dat.data_ro[:f.dof_dset.size] == rank).all()
lsum = sum(v.array())
lsum = MPI.COMM_WORLD.allreduce(lsum, op=MPI.SUM)
gathered = v.gather()
gsum = sum(gathered)
assert (lsum == gsum)
assert (len(gathered) == v.size())
gathered = v.gather([0])
assert ((len(gathered) == 1) and (gathered[0] == 0)) |
def test_pubsub_message_to_binary_string_str(mocker):
preprocessed_str = 'helloworld'
preprocessed_messages = [{'data': preprocessed_str}]
expected_str_encoded = base64.b64encode(preprocessed_str.encode('utf-8')).decode('ascii')
processor = PubsubMessageDataToBinaryString({})
res = processor.process_arg(preprocessed_messages, None, {})
assert (res[0]['data'] == expected_str_encoded) |
def split_large_split_ops(sorted_graph: List[Tensor], _: str) -> List[Tensor]:
modified = False
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
for op in sorted_ops:
if (not op._attrs['op'].startswith('split')):
continue
split_op = op
split_params_size = _split_kernel_single_input_output_param_size(split_op)
if (split_params_size > MAX_CUDA_PARAM_BYTES):
raise RuntimeError(f'cannot handle cases: split_params_size={split_params_size!r} > MAX_CUDA_PARAM_BYTES={MAX_CUDA_PARAM_BYTES!r}')
if ((split_params_size * len(split_op._attrs['outputs'])) <= MAX_CUDA_PARAM_BYTES):
continue
modified = True
split_dim = split_op._attrs['split_dim']
split_sizes = split_op._attrs['split_sizes']
outputs = split_op._attrs['outputs']
num_outputs_per_split = (MAX_CUDA_PARAM_BYTES // split_params_size)
num_split_ops = (((len(outputs) + num_outputs_per_split) - 1) // num_outputs_per_split)
output_mapping = []
for split_i in range(num_split_ops):
start = (split_i * num_outputs_per_split)
end = min(((split_i + 1) * num_outputs_per_split), len(split_op._attrs['outputs']))
remove_indices = (list(range(start)) + list(range(end, len(split_op._attrs['outputs']))))
new_split = ops.split()
new_outputs = new_split(split_op._attrs['inputs'][0], split_sizes, split_dim)
new_split.remove_output_at(remove_indices)
new_outputs = new_split._attrs['outputs']
sorted_graph += list(new_outputs)
output_mapping += list(zip(outputs[start:end], new_outputs))
for (old_output, new_output) in output_mapping:
transform_utils.replace_tensor(old_output, new_output)
if (not modified):
return sorted_graph
new_output_tensors = [tensor for tensor in sorted_graph if tensor._attrs['is_output']]
sorted_graph = toposort.toposort(new_output_tensors)
return sorted_graph |
class Solution(object):
def addStrings(self, num1, num2):
def to_digit(c):
if (c is None):
return 0
return (ord(c) - ord('0'))
def to_char(d):
if (d > 9):
carry = 1
d = (d % 10)
else:
carry = 0
return (chr((ord('0') + d)), carry)
from itertools import izip_longest
carry = 0
ret = []
for (c1, c2) in izip_longest(reversed(num1), reversed(num2)):
d1 = to_digit(c1)
d2 = to_digit(c2)
(d, carry) = to_char(((d1 + d2) + carry))
ret.append(d)
if (carry > 0):
ret.append(to_char(carry)[0])
return ''.join(reversed(ret)) |
class OptionPlotoptionsWaterfallSonificationTracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TopicMoveView(PermissionRequiredMixin, SingleObjectTemplateResponseMixin, FormMixin, SingleObjectMixin, ProcessFormView):
context_object_name = 'topic'
form_class = TopicMoveForm
model = Topic
success_message = _('This topic has been moved successfully.')
template_name = 'forum_moderation/topic_move.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
topic = self.get_object()
context['forum'] = topic.forum
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'topic': self.object, 'user': self.request.user})
return kwargs
def form_valid(self, form):
topic = self.object
new_forum = form.cleaned_data['forum']
topic.forum = new_forum
if form.cleaned_data['lock_topic']:
topic.status = Topic.TOPIC_LOCKED
else:
topic.status = Topic.TOPIC_MOVED
topic.save()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('forum_conversation:topic', kwargs={'forum_slug': self.object.forum.slug, 'forum_pk': self.object.forum.pk, 'slug': self.object.slug, 'pk': self.object.pk})
def get_controlled_object(self):
return self.get_object().forum
def perform_permissions_check(self, user, obj, perms):
return self.request.forum_permission_handler.can_move_topics(obj, user) |
class SimpleAccess(access):
_fields = ('buffer', 'arc', 'tokens')
_attributes = ('lineno', 'col_offset')
def __init__(self, buffer, arc, tokens, lineno=0, col_offset=0, **ARGS):
access.__init__(self, **ARGS)
self.buffer = buffer
self.arc = arc
self.tokens = tokens
self.lineno = int(lineno)
self.col_offset = int(col_offset) |
class DNSUtilsTests(unittest.TestCase):
def testCache(self):
c = Utils.Cache(maxCount=5, maxTime=60)
self.assertTrue((c.get('a') is None))
self.assertEqual(c.get('a', 'test'), 'test')
for i in range(5):
c.set(i, i)
for i in range(5):
self.assertEqual(c.get(i), i)
c.unset('a')
c.unset('a')
def testCacheMaxSize(self):
c = Utils.Cache(maxCount=5, maxTime=60)
for i in range(5):
c.set(i, i)
self.assertEqual([c.get(i) for i in range(5)], [i for i in range(5)])
self.assertNotIn((- 1), (c.get(i, (- 1)) for i in range(5)))
c.set(10, i)
self.assertIn((- 1), (c.get(i, (- 1)) for i in range(5)))
for i in range(10):
c.set(i, 1)
self.assertEqual(len(c), 5)
def testCacheMaxTime(self):
c = Utils.Cache(maxCount=5, maxTime=0.0005)
for i in range(10):
c.set(i, 1)
st = time.time()
self.assertTrue(Utils.wait_for((lambda : (time.time() >= (st + 0.0005))), 1))
self.assertTrue((len(c) <= 5))
for i in range(10):
self.assertTrue((c.get(i) is None))
self.assertEqual(len(c), 0)
def testOverflowedIPCache(self):
from threading import Thread
from random import shuffle
_org_cache = IPAddr.CACHE_OBJ
cache = IPAddr.CACHE_OBJ = Utils.Cache(maxCount=5, maxTime=60)
result = list()
count = (1 if unittest.F2B.fast else 50)
try:
def _TestCacheStr2IP(forw=True, result=[], random=False):
try:
c = count
while c:
c -= 1
s = (range(0, 256, 1) if forw else range(255, (- 1), (- 1)))
if random:
shuffle([i for i in s])
for i in s:
IPAddr(('192.0.2.' + str(i)), IPAddr.FAM_IPv4)
IPAddr(('2001:db8::' + str(i)), IPAddr.FAM_IPv6)
result.append(None)
except Exception as e:
DefLogSys.debug(e, exc_info=True)
result.append(e)
th1 = Thread(target=_TestCacheStr2IP, args=(True, result))
th1.start()
th2 = Thread(target=_TestCacheStr2IP, args=(False, result))
th2.start()
_TestCacheStr2IP(True, result, True)
finally:
th1.join()
th2.join()
IPAddr.CACHE_OBJ = _org_cache
self.assertEqual(result, ([None] * 3))
self.assertTrue((len(cache) <= cache.maxCount)) |
class OptionSeriesStreamgraphLabelStyle(Options):
def fontSize(self):
return self._config_get('0.8em')
def fontSize(self, num: float):
self._config(num, js_type=False)
def fontWeight(self):
return self._config_get('bold')
def fontWeight(self, text: str):
self._config(text, js_type=False) |
class Migration(migrations.Migration):
dependencies = [('core', '0024_auto__0702')]
operations = [migrations.AlterField(model_name='availability', name='resource', field=models.ForeignKey(related_name='availabilities', to='core.Resource')), migrations.AlterUniqueTogether(name='availability', unique_together=set([('start_date', 'resource')]))] |
def test_retrieve_from_file():
with RetrieveFileFromUri(__file__).get_file_object() as f:
c = f.read()
assert (type(c) is bytes)
assert (len(c) > 0)
with RetrieveFileFromUri(__file__).get_file_object(True) as f:
c = f.read()
assert (type(c) is str)
assert (len(c) > 0) |
.parametrize('tst, flt', [(dict(a=Input('a', 'prop'), b=[Input('b', 'prop')], c=dict(ca=[Input('ca1', 'prop'), Input('ca2', 'prop')])), [Input('a', 'prop'), Input('b', 'prop'), Input('ca1', 'prop'), Input('ca2', 'prop')]), ([Input('a', 'prop'), Input('b', 'prop'), Input('c', 'prop')], [Input('a', 'prop'), Input('b', 'prop'), Input('c', 'prop')]), ((Input('a', 'prop'), Input('b', 'prop'), Input('c', 'prop')), [Input('a', 'prop'), Input('b', 'prop'), Input('c', 'prop')])])
def test_dependency_collection(tst, flt):
dc = DependencyCollection(tst)
for (i, d) in enumerate(dc):
assert (d == flt[i])
for i in range(len(dc)):
assert (dc[i] == flt[i])
dc[0] = Input('b', 'prop')
assert (dc[0] == Input('b', 'prop'))
_ = dc.append(Input('new', 'prop'))
assert (dc[(- 1)] == Input('new', 'prop')) |
(description=_('Export selected media files as zip file'))
def save_as_zipfile(modeladmin, request, queryset):
from .zip import export_zipfile
site = get_current_site(request)
try:
zip_name = export_zipfile(site, queryset)
messages.info(request, (_('ZIP file exported as %s') % zip_name))
except Exception as e:
messages.error(request, (_('ZIP file export failed: %s') % str(e)))
return
return HttpResponseRedirect(os.path.join(django_settings.MEDIA_URL, zip_name)) |
class OptionPlotoptionsArcdiagramSonificationDefaultinstrumentoptionsMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class VirtualCheckIn(SoftDeletionModel):
__tablename__ = 'virtual_check_in'
id = db.Column(db.Integer, primary_key=True)
ticket_holder_id = db.Column(ARRAY(Integer), nullable=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))
event = db.relationship('Event', backref='virtual_check_ins')
microlocation_id = db.Column(db.Integer, db.ForeignKey('microlocations.id', ondelete='SET NULL'), nullable=True, default=None)
microlocation = db.relationship('Microlocation', backref='virtual_check_ins')
check_in_type = db.Column(db.String, nullable=False)
check_in_at = db.Column(db.DateTime(timezone=True))
check_out_at = db.Column(db.DateTime(timezone=True))
created_at = db.Column(db.DateTime(timezone=True), default=func.now())
updated_at = db.Column(db.DateTime(timezone=True))
is_deleted = db.Column(db.Boolean, default=False)
def __repr__(self):
return f'<Virtual Check In {self.id}>' |
def test_print_gas(plugintester, mocker):
mocker.spy(output, '_build_gas_profile_output')
plugintester.runpytest('--gas')
assert (output._build_gas_profile_output.call_count == 1)
plugintester.runpytest()
assert (output._build_gas_profile_output.call_count == 1)
plugintester.runpytest('-G')
assert (output._build_gas_profile_output.call_count == 2) |
class CharType(Type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = CHAR
self.byte_size = 1
def debug_info(self):
bs = bytearray()
bs.append(ENUM_ABBREV_CODE['BASE_TYPE_WITH_ENCODING'])
bs.append(self.byte_size)
bs.append(ENUM_DW_ATE['DW_ATE_signed'])
bs.extend(map(ord, self.name))
bs.append(0)
return bs |
class OptionSeriesBarSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
.external
.skipif((has_openai_key is False), reason='OpenAI API key not available')
def test_llm_task_factories_ner():
cfg_string = '\n [nlp]\n lang = "en"\n pipeline = ["llm"]\n\n [components]\n\n [components.llm]\n factory = "llm_ner"\n\n [components.llm.task]\n labels = PER,ORG,LOC\n\n [components.llm.model]\n _models = "spacy.GPT-3-5.v1"\n '
config = Config().from_str(cfg_string)
nlp = assemble_from_config(config)
text = 'Marc and Bob both live in Ireland.'
doc = nlp(text)
assert (len(doc.ents) > 0)
for ent in doc.ents:
assert (ent.label_ in ['PER', 'ORG', 'LOC']) |
def extractRainbowReadsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Menu(JsPackage):
def __init__(self, component, js_code: str=None, set_var: bool=True, is_py_data: bool=True, page: primitives.PageModel=None):
(self.varName, self.varData, self.__var_def) = (js_code, '', None)
(self.component, self.page) = (component, page)
(self._js, self._jquery) = ([], None)
def content(self):
return JsHtml.ContentFormatters(self.page, ('%s.innerHTML' % self.varName))
def set_text(self, value: Union[(str, primitives.JsDataModel)]):
value = JsUtils.jsConvertData(value, None)
return JsObjects.JsObjects.get(('%s.innerHTML = %s' % (self.varName, value)))
def set_url(self, value: str, target: str='_blank'):
value = JsUtils.jsConvertData(value, None)
target = JsUtils.jsConvertData(target, None)
return JsObjects.JsObjects.get(("%s.href = %s; %s.setAttribute('target', %s)" % (self.varName, value, self.varName, target))) |
class OptionPlotoptionsVariwideSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def compare_version(version1=None, version2=None, split_flag='.'):
if ((version1 is None) or (version1 == '') or (version2 is None) or (version2 == '')):
if (((version1 is None) or (version1 == '')) and (version2 is not None) and (version2 != '')):
return (- 1)
if (((version2 is None) or (version2 == '')) and (version1 is not None) and (version1 != '')):
return 1
if (version1 == version2):
return 0
try:
current_section_version1 = version1[:version1.index(split_flag)]
except:
current_section_version1 = version1
try:
current_section_version2 = version2[:version2.index(split_flag)]
except:
current_section_version2 = version2
if (int(current_section_version1) > int(current_section_version2)):
return 1
elif (int(current_section_version1) < int(current_section_version2)):
return (- 1)
try:
other_section_version1 = version1[(version1.index(split_flag) + 1):]
except:
other_section_version1 = ''
try:
other_section_version2 = version2[(version2.index(split_flag) + 1):]
except:
other_section_version2 = ''
return compare_version(other_section_version1, other_section_version2) |
class DefaultMetadataInterceptor(grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor):
def _inject_default_metadata(self, call_details: grpc.ClientCallDetails):
metadata = [('accept', 'application/grpc')]
if call_details.metadata:
metadata.extend(list(call_details.metadata))
new_details = _ClientCallDetails(call_details.method, call_details.timeout, metadata, call_details.credentials)
return new_details
def intercept_unary_unary(self, continuation: typing.Callable, client_call_details: grpc.ClientCallDetails, request: typing.Any):
updated_call_details = self._inject_default_metadata(client_call_details)
return continuation(updated_call_details, request)
def intercept_unary_stream(self, continuation: typing.Callable, client_call_details: grpc.ClientCallDetails, request: typing.Any):
updated_call_details = self._inject_default_metadata(client_call_details)
return continuation(updated_call_details, request) |
class OptionPlotoptionsBubbleSonificationContexttracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class FCR(cmd.Cmd):
def __init__(self, devices):
super().__init__()
self.prompt = 'FCR $ '
self._loop = asyncio.get_event_loop()
self._options = {'fcr_host': 'localhost', 'fcr_port': 5000, 'user': 'netbot', 'passwd': 'bot1234'}
self._devices = (devices or ['dev-001'])
def do_cred(self, line):
user = getpass.getuser()
user = (input(('Username [%s]: ' % user)) or user)
passwd = getpass.getpass(('%s password: ' % user))
self._option['user'] = user
self._option['passwd'] = passwd
def do_devices(self, line):
if line:
self._devices = line.split()
print()
self.columnize(self._devices)
print()
def do_login(self, line):
with DeviceCli(self._devices, self._options, self._loop) as cli:
cli.cmdloop()
def do_EOF(self, line):
return self.do_exit()
def do_exit(self, line=None):
print()
return True |
class myHelpCommand(HelpCommand):
def __init__(self, **options):
super().__init__(**options)
self.paginator = None
self.spacer = ' '
async def send_pages(self, header=False, footer=False):
destination = self.get_destination()
embed = Embed(color=3066993)
if header:
embed.set_author(name=self.context.bot.description, icon_url=self.context.bot.user.display_avatar)
for (category, entries) in self.paginator:
embed.add_field(name=category, value=entries, inline=False)
if footer:
embed.set_footer(text='Use felix help <command/category> for more information.')
(await destination.send(embed=embed))
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
def get_category(command):
cog = command.cog
return ((cog.qualified_name + ':') if (cog is not None) else 'Help:')
filtered = (await self.filter_commands(bot.commands, sort=True, key=get_category))
to_iterate = itertools.groupby(filtered, key=get_category)
for (cog_name, command_grouper) in to_iterate:
cmds = sorted(command_grouper, key=(lambda c: c.name))
category = f' {cog_name}'
if (len(cmds) == 1):
entries = f'{self.spacer}{cmds[0].name} {cmds[0].short_doc}'
else:
entries = ''
while (len(cmds) > 0):
entries += self.spacer
entries += ' | '.join([cmd.name for cmd in cmds[0:8]])
cmds = cmds[8:]
entries += ('\n' if cmds else '')
self.paginator.append((category, entries))
(await self.send_pages(header=True, footer=True))
async def send_cog_help(self, cog):
filtered = (await self.filter_commands(cog.get_commands(), sort=True))
if (not filtered):
(await self.context.send('No public commands in this cog. Try again with felix helpall.'))
return
category = f' {cog.qualified_name}'
entries = '\n'.join(((self.spacer + f'**{command.name}** {(command.short_doc or command.description)}') for command in filtered))
self.paginator.append((category, entries))
(await self.send_pages(footer=True))
async def send_group_help(self, group):
filtered = (await self.filter_commands(group.commands, sort=True))
if (not filtered):
(await self.context.send('No public commands in group. Try again with felix helpall.'))
return
category = f'**{group.name}** - {(group.description or group.short_doc)}'
entries = '\n'.join(((self.spacer + f'**{command.name}** {command.short_doc}') for command in filtered))
self.paginator.append((category, entries))
(await self.send_pages(footer=True))
async def send_command_help(self, command):
signature = self.get_command_signature(command)
helptext = (command.help or command.description or 'No help Text')
self.paginator.append((signature, helptext))
(await self.send_pages())
async def prepare_help_command(self, ctx, command=None):
self.paginator = []
(await super().prepare_help_command(ctx, command)) |
class PCBase(PCSNESBase):
_asciiname = 'preconditioner'
_objectname = 'pc'
needs_python_amat = False
needs_python_pmat = False
def apply(self, pc, X, Y):
pass
def applyTranspose(self, pc, X, Y):
pass
def setUp(self, pc):
(A, P) = pc.getOperators()
Atype = A.getType()
Ptype = P.getType()
pcname = ((type(self).__module__ + '.') + type(self).__name__)
if (self.needs_python_amat and (Atype != PETSc.Mat.Type.PYTHON)):
raise ValueError(("PC '%s' needs amat to have type python, but it is %s" % (pcname, Atype)))
if (self.needs_python_pmat and (Ptype != PETSc.Mat.Type.PYTHON)):
raise ValueError(("PC '%s' needs pmat to have type python, but it is %s" % (pcname, Ptype)))
super().setUp(pc) |
def to_svg(index, shortname, alias, uc, alt, title, category, options, md):
if (index == 'twemoji'):
svg_path = TWEMOJI_SVG_CDN
else:
svg_path = EMOJIONE_SVG_CDN
attributes = {'class': options.get('classes', index), 'alt': alt, 'src': ('%s%s.svg' % (options.get('image_path', svg_path), uc))}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
return etree.Element('img', attributes) |
class MockMessageHandler():
def __init__(self):
self.__iterations = []
self.__progresses = []
self.__stats = []
self.__statuses = []
self.__tiles = []
def iters_changed(self, iteration_number):
self.__iterations.append(iteration_number)
def progress_changed(self, progress):
self.__progresses.append(progress)
def stats_changed(self, stat):
stat_from_messages = messages.Stats.fromList(stat)
self.__stats.append(stat_from_messages)
def status_changed(self, status):
self.__statuses.append(status)
def image_changed(self, tile_x1, tile_y1, tile_x2, tile_y2):
self.__tiles.append((tile_x1, tile_y1, tile_x2, tile_y2))
def is_interrupted():
return False
def get_statuses_history(self):
return self.__statuses
def get_last_image_tile_drawn(self):
return self.__tiles[(- 1)]
def has_finished(self):
return (self.__statuses[(- 1)] == 0) |
def test_polaroid(fx_asset):
with Image(filename='rose:') as img:
img.polaroid()
with Image(filename='rose:') as img:
img.polaroid(caption='hello')
with Image(filename='rose:') as img:
font = Font(str(fx_asset.joinpath('League_Gothic.otf')), 12, Color('orange'), True, Color('pink'), 1)
img.polaroid(caption='hello', font=font)
with raises(TypeError):
img.polaroid(caption='hello', font='League_Gothic.otf') |
class OptionSeriesSankeyNodesDatalabels(Options):
def align(self):
return self._config_get('undefined')
def align(self, text: str):
self._config(text, js_type=False)
def allowOverlap(self):
return self._config_get(False)
def allowOverlap(self, flag: bool):
self._config(flag, js_type=False)
def animation(self) -> 'OptionSeriesSankeyNodesDatalabelsAnimation':
return self._config_sub_data('animation', OptionSeriesSankeyNodesDatalabelsAnimation)
def backgroundColor(self):
return self._config_get('none')
def backgroundColor(self, text: str):
self._config(text, js_type=False)
def borderColor(self):
return self._config_get(None)
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(0)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(0)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def crop(self):
return self._config_get(False)
def crop(self, flag: bool):
self._config(flag, js_type=False)
def defer(self):
return self._config_get(True)
def defer(self, flag: bool):
self._config(flag, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def filter(self) -> 'OptionSeriesSankeyNodesDatalabelsFilter':
return self._config_sub_data('filter', OptionSeriesSankeyNodesDatalabelsFilter)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get(None)
def formatter(self, value: Any):
self._config(value, js_type=False)
def inside(self):
return self._config_get(True)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def nodeFormat(self):
return self._config_get('undefined')
def nodeFormat(self, text: str):
self._config(text, js_type=False)
def nodeFormatter(self):
return self._config_get(None)
def nodeFormatter(self, value: Any):
self._config(value, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, flag: bool):
self._config(flag, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def overflow(self):
return self._config_get('justify')
def overflow(self, text: str):
self._config(text, js_type=False)
def padding(self):
return self._config_get(5)
def padding(self, num: float):
self._config(num, js_type=False)
def position(self):
return self._config_get('center')
def position(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def shape(self):
return self._config_get('square')
def shape(self, text: str):
self._config(text, js_type=False)
def style(self):
return self._config_get(None)
def style(self, value: Any):
self._config(value, js_type=False)
def textPath(self) -> 'OptionSeriesSankeyNodesDatalabelsTextpath':
return self._config_sub_data('textPath', OptionSeriesSankeyNodesDatalabelsTextpath)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('undefined')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get('undefined')
def y(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(6)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class OptionSeriesDumbbellDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
class RevisionMixinView(RevisionMixin, View):
def revision_request_creates_revision(self, request):
silent = (request.headers.get('X-Norevision', 'false') == 'true')
return (super().revision_request_creates_revision(request) and (not silent))
def dispatch(self, request):
return save_obj_view(request) |
.unit
class TestHandleOktaCredentialsOptions():
def test_config_dne_raises(self, test_config: FidesConfig) -> None:
with pytest.raises(click.UsageError):
input_org_url = ''
input_token = ''
input_credentials_id = 'UNKNOWN'
utils.handle_okta_credentials_options(fides_config=test_config, token=input_token, org_url=input_org_url, credentials_id=input_credentials_id)
def test_returns_config_dict(self, test_config: FidesConfig) -> None:
input_org_url = ''
input_token = ''
input_credentials_id = 'okta_1'
okta_config = utils.handle_okta_credentials_options(fides_config=test_config, token=input_token, org_url=input_org_url, credentials_id=input_credentials_id)
assert (okta_config == {'orgUrl': ' 'token': 'redacted_override_in_tests'})
def test_returns_input_dict(self, test_config: FidesConfig) -> None:
input_org_url = 'hello.com'
input_token = 'abcd12345'
input_credentials_id = ''
okta_config = utils.handle_okta_credentials_options(fides_config=test_config, token=input_token, org_url=input_org_url, credentials_id=input_credentials_id)
assert (okta_config == {'orgUrl': input_org_url, 'token': input_token}) |
class OptionPlotoptionsBarSonificationTracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_get_answer(anthropic_llm):
with patch('langchain.chat_models.ChatAnthropic') as mock_chat:
mock_chat_instance = mock_chat.return_value
mock_chat_instance.return_value = MagicMock(content='Test Response')
prompt = 'Test Prompt'
response = anthropic_llm._get_answer(prompt, anthropic_llm.config)
assert (response == 'Test Response')
mock_chat.assert_called_once_with(anthropic_api_key='test_api_key', temperature=anthropic_llm.config.temperature, model=anthropic_llm.config.model)
mock_chat_instance.assert_called_once_with(anthropic_llm._get_messages(prompt, system_prompt=anthropic_llm.config.system_prompt)) |
def filter_file_filter_profile_data(json):
option_list = ['comment', 'extended_log', 'feature_set', 'log', 'name', 'replacemsg_group', 'rules', 'scan_archive_contents']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class inner_vlan_vid(bsn_tlv):
type = 197
def __init__(self, value=None):
if (value != None):
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!H', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!H', self.value))
length = sum([len(x) for x in packed])
packed[1] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = inner_vlan_vid()
_type = reader.read('!H')[0]
assert (_type == 197)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.value = reader.read('!H')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.value != other.value):
return False
return True
def pretty_print(self, q):
q.text('inner_vlan_vid {')
with q.group():
with q.indent(2):
q.breakable()
q.text('value = ')
q.text(('%#x' % self.value))
q.breakable()
q.text('}') |
class OneFormAssembler(FormAssembler):
def __init__(self, form, tensor, bcs=(), diagonal=False, zero_bc_nodes=False, form_compiler_parameters=None, needs_zeroing=True):
super().__init__(form, tensor, bcs, form_compiler_parameters, needs_zeroing)
self._diagonal = diagonal
self._zero_bc_nodes = zero_bc_nodes
def diagonal(self):
return self._diagonal
def result(self):
return self._tensor
def execute_parloops(self):
with self._tensor.dat.frozen_halo(op2.INC):
for parloop in self.parloops:
parloop()
def _apply_bc(self, bc):
if isinstance(bc, DirichletBC):
self._apply_dirichlet_bc(bc)
elif isinstance(bc, EquationBCSplit):
bc.zero(self._tensor)
type(self)(bc.f, self._tensor, bc.bcs, self._diagonal, self._zero_bc_nodes, self._form_compiler_params, needs_zeroing=False).assemble()
else:
raise AssertionError
def _apply_dirichlet_bc(self, bc):
if (not self._zero_bc_nodes):
tensor_func = self._tensor.riesz_representation(riesz_map='l2')
if self._diagonal:
bc.set(tensor_func, 1)
else:
bc.apply(tensor_func)
self._tensor.assign(tensor_func.riesz_representation(riesz_map='l2'))
else:
bc.zero(self._tensor) |
.gpu
.skipif((not has_torch_cuda_gpu), reason='needs GPU & CUDA')
def test_invalid_model():
orig_config = Config().from_str(_NLP_CONFIG)
config = copy.deepcopy(orig_config)
config['components']['llm']['model']['name'] = 'x'
with pytest.raises(ValueError, match='unexpected value; permitted'):
spacy.util.load_model_from_config(config, auto_fill=True)
torch.cuda.empty_cache() |
def test_remove_stream_rooms_organizer(db, client, user, jwt):
stream = get_stream(db, user=user)
data = json.dumps({'data': {'id': str(stream.id), 'type': 'video-stream', 'relationships': {'rooms': {'data': []}}}})
assert (len(stream.rooms) == 1)
response = client.patch(f'/v1/video-streams/{stream.id}', content_type='application/vnd.api+json', headers=jwt, data=data)
assert (response.status_code == 200)
db.session.refresh(stream)
assert (stream.rooms == []) |
class BackwardSliceSwitchVariableDetection(PipelineStage):
name = 'backward-slice-switch-variable-detection'
def __init__(self):
self._def_map: DefMap
self._use_map: UseMap
self._dereferences_used_in_branches: set
def run(self, task: DecompilerTask):
self._init_map(task.graph)
for switch_block in {edge.source for edge in task.graph.edges if isinstance(edge, SwitchCase)}:
self._handle_switch_block(switch_block)
def _init_map(self, cfg: ControlFlowGraph):
(self._use_map, self._def_map, self._dereferences_used_in_branches) = (UseMap(), DefMap(), set())
for instruction in cfg.instructions:
self._def_map.add(instruction)
self._use_map.add(instruction)
if (isinstance(instruction, Branch) and (not instruction.requirements)):
new_expressions = {expr for expr in instruction.condition if is_dereference(expr)}
self._dereferences_used_in_branches.update(new_expressions)
def _handle_switch_block(self, basic_block: BasicBlock):
switch_instruction = basic_block.instructions[(- 1)]
switch_expression = self.find_switch_expression(switch_instruction)
switch_instruction.substitute(switch_instruction.expression, switch_expression)
def find_switch_expression(self, switch_instruction: Instruction):
traced_variable = (switch_instruction.expression.requirements[0] if switch_instruction.expression.requirements else switch_instruction.expression)
for variable in self._backwardslice(traced_variable):
if self._is_bounds_checked(variable):
return variable
raise ValueError('No switch variable candidate found.')
def _is_used_in_condition_assignment(self, value: Variable):
for usage in self._use_map.get(value):
if (isinstance(usage, Assignment) and isinstance(usage.value, Condition) and (usage.requirements == [value])):
return True
return False
def _is_used_in_branch(self, value: Variable):
for usage in self._use_map.get(value):
if (isinstance(usage, Branch) and (usage.requirements == [value])):
return True
return False
def _is_predecessor_dereferenced_in_branch(self, value: Variable) -> bool:
if (definition := self._def_map.get(value)):
return (any(((exp in self._dereferences_used_in_branches) for exp in definition.value)) or (definition.value in self._dereferences_used_in_branches))
return False
def _is_copy_assigned(self, value: Variable) -> bool:
if (definition := self._def_map.get(value)):
return isinstance(definition.value, Variable)
return False
def _is_bounds_checked(self, value: Variable) -> bool:
return any([self._is_copy_assigned(value), self._is_used_in_condition_assignment(value), self._is_used_in_branch(value), self._is_predecessor_dereferenced_in_branch(value)])
def _backwardslice(self, value: Variable):
visited = set()
todo = [value]
while (todo and (current := todo.pop())):
(yield current)
visited.add(current)
definition = self._def_map.get(current)
if definition:
todo.extend([requirement for requirement in definition.requirements if (requirement not in visited)]) |
class OptionSeriesWindbarbSonificationDefaultspeechoptionsMapping(Options):
def pitch(self) -> 'OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingPlaydelay)
def rate(self) -> 'OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingRate':
return self._config_sub_data('rate', OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingTime)
def volume(self) -> 'OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesWindbarbSonificationDefaultspeechoptionsMappingVolume) |
.register_type(Bgp4MpMrtRecord.SUBTYPE_BGP4MP_MESSAGE)
class Bgp4MpMessageMrtMessage(Bgp4MpMrtMessage):
_HEADER_FMT = '!HHHH'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_ADDRS_FMT = '!%ds%ds'
AFI_IPv4 = 1
AFI_IPv6 = 2
def __init__(self, peer_as, local_as, if_index, peer_ip, local_ip, bgp_message, afi=None):
self.peer_as = peer_as
self.local_as = local_as
self.if_index = if_index
self.peer_ip = peer_ip
self.local_ip = local_ip
assert isinstance(bgp_message, bgp.BGPMessage)
self.bgp_message = bgp_message
self.afi = afi
def parse(cls, buf):
(peer_as, local_as, if_index, afi) = struct.unpack_from(cls._HEADER_FMT, buf)
offset = cls.HEADER_SIZE
if (afi == cls.AFI_IPv4):
addrs_fmt = (cls._ADDRS_FMT % (4, 4))
elif (afi == cls.AFI_IPv6):
addrs_fmt = (cls._ADDRS_FMT % (16, 16))
else:
raise struct.error(('Unsupported address family: %d' % afi))
(peer_ip, local_ip) = struct.unpack_from(addrs_fmt, buf, offset)
peer_ip = ip.bin_to_text(peer_ip)
local_ip = ip.bin_to_text(local_ip)
offset += struct.calcsize(addrs_fmt)
rest = buf[offset:]
(bgp_message, _, _) = bgp.BGPMessage.parser(rest)
return cls(peer_as, local_as, if_index, peer_ip, local_ip, bgp_message, afi)
def serialize(self):
if (ip.valid_ipv4(self.peer_ip) and ip.valid_ipv4(self.local_ip)):
self.afi = self.AFI_IPv4
elif (ip.valid_ipv6(self.peer_ip) and ip.valid_ipv6(self.local_ip)):
self.afi = self.AFI_IPv6
else:
raise ValueError(('peer_ip and local_ip must be the same address family: peer_ip=%s, local_ip=%s' % (self.peer_ip, self.local_ip)))
buf = struct.pack(self._HEADER_FMT, self.peer_as, self.local_as, self.if_index, self.afi)
buf += ip.text_to_bin(self.peer_ip)
buf += ip.text_to_bin(self.local_ip)
buf += self.bgp_message.serialize()
return buf |
class serienRecMainScreen(serienRecBaseScreen, Screen, HelpableScreen):
def __init__(self, session):
serienRecBaseScreen.__init__(self, session)
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.session = session
self.picload = ePicLoad()
self.picloader = None
self.skin = None
self.chooseMenuList = None
self.chooseMenuList_popup = None
self.popup_list = []
self.piconLoader = PiconLoader()
self.database = None
self.singleTimer_conn = None
self.displayTimer_conn = None
self['actions'] = HelpableActionMap(self, 'SerienRecorderActions', {'ok': (self.keyOK, 'Marker fur die ausgewahlte Serie hinzufugen'), 'cancel': (self.keyCancel, 'SerienRecorder beenden'), 'left': (self.keyLeft, 'Zur vorherigen Seite blattern'), 'right': (self.keyRight, 'Zur nachsten Seite blattern'), 'up': (self.keyUp, 'Eine Zeile nach oben'), 'down': (self.keyDown, 'Eine Zeile nach unten'), 'red': (self.keyRed, 'Anzeige-Modus wechseln (Serien-Planer / Top 30)'), 'green': (self.keyGreen, 'Ansicht Sender-Zuordnung offnen'), 'yellow': (self.keyYellow, 'Ansicht Serien-Marker offnen'), 'blue': (self.keyBlue, 'Ansicht Timer-Liste offnen'), 'info': (self.keyCheck, 'Suchlauf fur Timer starten'), 'menu': (self.recSetup, 'Menu fur globale Einstellungen offnen'), 'nextBouquet': (self.nextPage, 'Serienplaner des nachsten Tages laden'), 'prevBouquet': (self.backPage, 'Serienplaner des vorherigen Tages laden'), 'startTeletext': (self.wunschliste, 'Informationen zur ausgewahlten Serie auf Wunschliste anzeigen'), '0': (self.readLogFile, 'Log-File des letzten Suchlaufs anzeigen'), '1': (self.searchSeries, 'Serie manuell suchen'), '2': (self.changeTVDBID, 'TVDB-ID andern'), '3': (self.showProposalDB, 'Liste der Serien/Staffel-Starts anzeigen'), '4': (self.serieInfo, 'Informationen zur ausgewahlten Serie anzeigen'), '5': (self.episodeList, 'Episoden der ausgewahlten Serie anzeigen'), '6': (self.showConflicts, 'Liste der Timer-Konflikte anzeigen'), '7': (self.showWishlist, 'Merkzettel (vorgemerkte Folgen) anzeigen'), '8': (self.reloadSerienplaner, 'Serien-Planer neu laden'), '9': (self.showTransmissions, 'Sendetermine fur ausgewahlte Serie anzeigen')}, (- 1))
self.helpList[0][2].sort()
self['helpActions'] = ActionMap(['SerienRecorderActions'], {'displayHelp': self.showHelp, 'displayHelp_long': self.showManual}, 0)
ReadConfigFile()
if (not os.path.exists(config.plugins.serienRec.piconPath.value)):
config.plugins.serienRec.showPicons.value = False
self.setupSkin()
global showMainScreen
if (config.plugins.serienRec.firstscreen.value == '0'):
showMainScreen = True
else:
showMainScreen = False
self.pRegional = 0
self.pPaytv = 1
self.pPrime = 1
self.page = 0
self.modus = 'list'
self.loading = True
self.daylist = [[]]
self.displayTimer = None
self.displayMode = 1
self.serviceRefs = None
self.onLayoutFinish.append(self.setSkinProperties)
self.onClose.append(self.__onClose)
self.onFirstExecBegin.append(self.showSplashScreen)
def showInfoText(self):
from .SerienRecorderStartupInfoScreen import ShowStartupInfo
self.session.openWithCallback(self.startScreen, ShowStartupInfo)
def showSplashScreen(self):
from .SerienRecorderSplashScreen import ShowSplashScreen
self.session.openWithCallback(self.checkForUpdate, ShowSplashScreen)
def checkForUpdate(self):
if config.plugins.serienRec.Autoupdate.value:
from .SerienRecorderUpdateScreen import checkGitHubUpdate
checkGitHubUpdate(self.session).checkForUpdate()
if fileExists(('%s/Changelog' % os.path.dirname(__file__))):
self.showInfoText()
else:
self.startScreen()
def callHelpAction(self, *args):
HelpableScreen.callHelpAction(self, *args)
def setSkinProperties(self):
super(self.__class__, self).setSkinProperties()
self['text_red'].setText('Anzeige-Modus')
self['text_green'].setText('Sender zuordnen')
self['text_ok'].setText('Marker hinzufugen')
self['text_yellow'].setText('Serien-Marker')
self['text_blue'].setText('Timer-Liste')
self.num_bt_text[0][1] = 'Episoden-Liste'
self.num_bt_text[1][0] = 'Serie suchen'
self.num_bt_text[2][0] = 'TVDB-ID andern'
self.num_bt_text[2][2] = 'Timer suchen'
self.num_bt_text[3][1] = 'Neu laden'
self.num_bt_text[4][1] = 'Sendetermine'
super(self.__class__, self).startDisplayTimer()
def setupSkin(self):
self.skin = None
InitSkin(self)
self.chooseMenuList = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self.chooseMenuList.l.setFont(0, gFont('Regular', (20 + int(config.plugins.serienRec.listFontsize.value))))
self.chooseMenuList.l.setItemHeight(int((56 * skinFactor)))
self['menu_list'] = self.chooseMenuList
self['menu_list'].show()
self.chooseMenuList_popup = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self.chooseMenuList_popup.l.setFont(0, gFont('Regular', (20 + int(config.plugins.serienRec.listFontsize.value))))
self.chooseMenuList_popup.l.setItemHeight(int((30 * skinFactor)))
self['popup_list'] = self.chooseMenuList_popup
self['popup_list'].hide()
self['title'].setText('Lade infos from Web...')
if config.plugins.serienRec.showCover.value:
self['cover'].show()
if (not config.plugins.serienRec.showAllButtons.value):
self['bt_red'].show()
self['bt_green'].show()
self['bt_ok'].show()
self['bt_yellow'].show()
self['bt_blue'].show()
self['bt_exit'].show()
self['bt_text'].show()
self['bt_epg'].show()
self['bt_info'].show()
self['bt_menu'].show()
self['text_red'].show()
self['text_green'].show()
self['text_ok'].show()
self['text_yellow'].show()
self['text_blue'].show()
self['text_0'].show()
self['text_1'].show()
self['text_2'].show()
self['text_3'].show()
self['text_4'].show()
def updateMenuKeys(self):
updateMenuKeys(self)
def changeTVDBID(self):
from .SerienRecorderScreenHelpers import EditTVDBID
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
editTVDBID = EditTVDBID(self, self.session, serien_name, None, serien_wlid, serien_fsid, 0)
editTVDBID.changeTVDBID()
def reloadSerienplaner(self):
self.readPlanerData(True)
def readLogFile(self):
from .SerienRecorderLogScreen import serienRecReadLog
self.session.open(serienRecReadLog)
def showProposalDB(self):
from .SerienRecorderSeasonBeginsScreen import serienRecShowSeasonBegins
self.session.openWithCallback(self.readPlanerData, serienRecShowSeasonBegins)
def searchSeries(self):
if (self.modus == 'list'):
self.session.openWithCallback(self.wSearch, NTIVirtualKeyBoard, title='Serien Titel eingeben:')
def wSearch(self, serien_name):
if serien_name:
from .SerienRecorderSearchResultScreen import serienRecSearchResultScreen
self.session.openWithCallback(self.handleSeriesSearchEnd, serienRecSearchResultScreen, serien_name)
def handleSeriesSearchEnd(self, serien_fsid=None):
if serien_fsid:
from .SerienRecorderMarkerScreen import serienRecMarker
self.session.openWithCallback(self.readPlanerData, serienRecMarker, serien_fsid)
else:
self.readPlanerData(False)
def serieInfo(self):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
from .SerienRecorderSeriesInfoScreen import serienRecShowInfo
self.session.open(serienRecShowInfo, serien_name, serien_wlid, serien_fsid)
def episodeList(self):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
if (serien_wlid > 0):
from .SerienRecorderEpisodesScreen import serienRecEpisodes
self.session.open(serienRecEpisodes, serien_name, serien_wlid)
def wunschliste(self):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
super(self.__class__, self).wunschliste(serien_fsid)
def showTransmissions(self):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
if (serien_name and serien_wlid):
from .SerienRecorderTransmissionsScreen import serienRecSendeTermine
self.session.openWithCallback(None, serienRecSendeTermine, serien_name, serien_wlid, serien_fsid)
def setHeadline(self):
if (int(config.plugins.serienRec.screenplaner.value) == 1):
self['headline'].setText('Serien-Planer (Serien Tagesubersicht)')
self['text_red'].setText('Top 30')
elif (int(config.plugins.serienRec.screenplaner.value) == 2):
self['headline'].setText('Top 30 SerienRecorder Serien')
self['text_red'].setText('Tagesubersicht')
self['headline'].instance.setForegroundColor(parseColor('red'))
def recSetup(self):
from .SerienRecorderSetupScreen import serienRecSetup
self.session.openWithCallback(self.setupClose, serienRecSetup)
def setupClose(self, result):
super(self.__class__, self).setupClose(result)
if result[1]:
self.readPlanerData()
def startScreen(self):
print(('[SerienRecorder] version %s is running...' % config.plugins.serienRec.showversion.value))
from .SerienRecorderCheckForRecording import checkForRecordingInstance, refreshTimer, initDB
if (not refreshTimer):
if config.plugins.serienRec.timeUpdate.value:
checkForRecordingInstance.initialize(self.session, False, False)
if (not initDB()):
print('[SerienRecorder] initDB failed')
super(self.__class__, self).close()
else:
self.database = SRDatabase(serienRecDataBaseFilePath)
if (not self.database.hasChannels()):
print('[SerienRecorder] Channellist is empty !')
from .SerienRecorderChannelScreen import serienRecMainChannelEdit
self.session.openWithCallback(self.readPlanerData, serienRecMainChannelEdit)
else:
from .SerienRecorderChannelScreen import checkChannelListTimelineness
self.serviceRefs = self.database.getActiveServiceRefs()
channelListUpToDate = True
if (config.plugins.serienRec.channelUpdateNotification.value == '0'):
channelListUpToDate = checkChannelListTimelineness(self.database)
if channelListUpToDate:
self.switchStartScreen()
else:
self.session.openWithCallback(self.handleChannelListUpdate, MessageBox, 'Die Senderliste wurde auf dem Server aktualisiert.\nSie muss auch im SerienRecorder aktualisiert werden.\nWechseln Sie zur Senderzuordnung und aktualisieren Sie die Senderliste mit der grunen Taste.\n\nZur Senderzuordnung wechseln?', MessageBox.TYPE_YESNO)
def handleChannelListUpdate(self, showChannelEdit=False):
if showChannelEdit:
from .SerienRecorderChannelScreen import serienRecMainChannelEdit
self.session.openWithCallback(self.switchStartScreen, serienRecMainChannelEdit)
else:
self.switchStartScreen()
def switchStartScreen(self, unused=None):
if (not showMainScreen):
from .SerienRecorderMarkerScreen import serienRecMarker
self.session.openWithCallback(self.readPlanerData, serienRecMarker)
else:
self.readPlanerData(False)
def readPlanerData(self, clearCache=True):
print(('[SerienRecorder] readPlanerData - Clear cache = %s' % str(clearCache)))
if (not showMainScreen):
self.keyCancel()
self.close()
return
self.setHeadline()
self['title'].instance.setForegroundColor(parseColor('foreground'))
self['menu_list'].moveToIndex(0)
self.loading = True
self['title'].setText('Lade Infos aus dem Speicher...')
cache = serienRecSeriesPlanner.loadPlannerData(config.plugins.serienRec.screenplaner.value)
if clearCache:
cache.clear()
lt = datetime.datetime.now()
if (config.plugins.serienRec.screenplaner.value == 1):
lt += datetime.timedelta(days=self.page)
key = time.strftime('%d.%m.%Y', lt.timetuple())
if (key in cache):
try:
if (config.plugins.serienRec.screenplaner.value == 1):
self.processPlanerData(cache[key], True)
else:
self.processTopThirty(cache[key], True)
except:
SRLogger.writeLog('Fehler beim Lesen und Verarbeiten der Serien-Planer bzw. Top30 Daten aus dem Cache.\n', True)
else:
self['title'].setText('Lade Infos vom Web...')
webChannels = self.database.getActiveChannels()
def cacheData():
if (config.plugins.serienRec.screenplaner.value == 1):
result = SeriesServer().doGetPlannerData(int(self.page), webChannels)
else:
result = SeriesServer().doGetTopThirty()
return result
def onCacheDataSuccessful(result):
if (config.plugins.serienRec.screenplaner.value == 1):
self.processPlanerData(result, False)
else:
self.processTopThirty(result, False)
def onCacheDataFailed(error):
SRLogger.writeLog('Fehler beim Abrufen und Verarbeiten der Serien-Planer bzw. Top30 Daten vom SerienServer.\n', True)
import twisted.python.runtime
if twisted.python.runtime.platform.supportsThreads():
from twisted.internet.threads import deferToThread
deferToThread(cacheData).addCallback(onCacheDataSuccessful).addErrback(onCacheDataFailed)
else:
try:
data = cacheData()
onCacheDataSuccessful(data)
except:
onCacheDataFailed()
def processPlanerData(self, data, useCache=False):
if ((not data) or (len(data) == 0)):
self['title'].setText('Fehler beim Abrufen der Serien-Planer Daten')
return
if useCache:
(headDate, self.daylist) = data
else:
markers = self.database.getAllMarkerStatusForBoxID(config.plugins.serienRec.BoxID.value)
seriesPlanner = serienRecSeriesPlanner()
(headDate, self.daylist) = seriesPlanner.processPlannerData(data, markers, self.page)
self.loading = False
if (len(self.daylist[0]) != 0):
if headDate:
self['title'].setText(('Fur %s werden %s Episode(n) vorgeschlagen' % (headDate[0], len(self.daylist[0]))))
self['title'].instance.setForegroundColor(parseColor('foreground'))
else:
self['title'].setText(('Fur heute werden %s Episode(n) vorgeschlagen' % len(self.daylist[0])))
self['title'].instance.setForegroundColor(parseColor('foreground'))
self.chooseMenuList.setList(list(map(self.buildPlanerList, self.daylist[0])))
self.getCover()
else:
if ((int(self.page) < 1) and (not (int(self.page) == 0))):
self.page -= 1
self['title'].setText(('Fur heute werden %s Episode(n) vorgeschlagen' % len(self.daylist[0])))
self['title'].instance.setForegroundColor(parseColor('foreground'))
print('[SerienRecorder] Wunschliste Serien-Planer -> LISTE IST LEER !!!!')
self.chooseMenuList.setList(list(map(self.buildPlanerList, self.daylist[0])))
def processTopThirty(self, data, useCache=False):
if ((not data) or (len(data) == 0)):
self['title'].setText('Fehler beim Abrufen der Serien-Planer Daten')
return
if useCache:
(headDate, self.daylist) = data
else:
self.daylist = [[]]
headDate = [data['date']]
markers = self.database.getAllMarkerStatusForBoxID(config.plugins.serienRec.BoxID.value)
rank = 0
for serie in data['series']:
serien_name = toStr(serie['name'])
serien_wlid = int(serie['id'])
serien_fsid = serie['fs_id']
serien_info = serie['info']
average = serie['average']
serieAdded = 0
if (serien_fsid in markers):
serieAdded = (1 if markers[serien_fsid] else 2)
rank += 1
self.daylist[0].append((serien_name, average, serien_wlid, serieAdded, rank, serien_fsid, serien_info))
if headDate:
d = headDate[0].split(',')
d.reverse()
key = d[0].strip()
cache = serienRecSeriesPlanner.loadPlannerData(2)
cache.update({key: (headDate, self.daylist)})
serienRecSeriesPlanner.writePlannerData(2, cache)
self.loading = False
self['title'].setText('Die Serien mit den meisten Abrufen in den letzten 12 Monaten')
self.chooseMenuList.setList(list(map(self.buildTopThirtyList, self.daylist[0])))
self.getCover()
def buildPlanerList(self, entry):
(regional, paytv, neu, prime, transmissionTime, serien_name, sender, staffel, episode, title, aufnahme, serieAdded, bereits_vorhanden, serien_wlid, serien_fsid, serien_info) = entry
serienRecMainPath = os.path.dirname(__file__)
imageNone = ('%s/images/black.png' % serienRecMainPath)
imageNeu = ('%s/images/neu.png' % serienRecMainPath)
imageTimer = ('%s/images/timer.png' % serienRecMainPath)
imageHDD = ('%s/images/hdd_icon.png' % serienRecMainPath)
if (serieAdded == 1):
seriesColor = parseColor('green').argb()
elif (serieAdded == 2):
seriesColor = parseColor('red').argb()
else:
seriesColor = None
titleColor = titleColorSelected = timeColor = parseColor('foreground').argb()
if aufnahme:
titleColor = parseColor('blue').argb()
titleColorSelected = 39367
if (int(neu) == 0):
imageNeu = imageNone
if bereits_vorhanden:
imageHDDTimer = imageHDD
elif aufnahme:
imageHDDTimer = imageTimer
else:
imageHDDTimer = imageNone
if (config.plugins.serienRec.showPicons.value != '0'):
picon = loadPNG(imageNone)
if (sender and self.serviceRefs.get(sender)):
piconPath = self.piconLoader.getPicon((self.serviceRefs.get(sender)[0] if (config.plugins.serienRec.showPicons.value == '1') else self.serviceRefs.get(sender)[1]))
if piconPath:
self.picloader = PicLoader((80 * skinFactor), (40 * skinFactor))
picon = self.picloader.load(piconPath)
self.picloader.destroy()
return [entry, (eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, (5 * skinFactor), (5 * skinFactor), (80 * skinFactor), (40 * skinFactor), picon), (eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, (330 * skinFactor), (5 * skinFactor), (30 * skinFactor), (22 * skinFactor), loadPNG(imageNeu)), (eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, (330 * skinFactor), (27 * skinFactor), (30 * skinFactor), (22 * skinFactor), loadPNG(imageHDDTimer)), (eListboxPythonMultiContent.TYPE_TEXT, (100 * skinFactor), 3, (230 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), sender), (eListboxPythonMultiContent.TYPE_TEXT, (100 * skinFactor), (29 * skinFactor), (150 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), transmissionTime, timeColor, timeColor), (eListboxPythonMultiContent.TYPE_TEXT, (365 * skinFactor), 3, (500 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), serien_name, seriesColor, seriesColor), (eListboxPythonMultiContent.TYPE_TEXT, (365 * skinFactor), (29 * skinFactor), (500 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), title, titleColor, titleColorSelected)]
else:
return [entry, (eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, (5 * skinFactor), (5 * skinFactor), (30 * skinFactor), (22 * skinFactor), loadPNG(imageNeu)), (eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, (5 * skinFactor), (27 * skinFactor), (30 * skinFactor), (22 * skinFactor), loadPNG(imageHDDTimer)), (eListboxPythonMultiContent.TYPE_TEXT, (40 * skinFactor), 3, (280 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), sender), (eListboxPythonMultiContent.TYPE_TEXT, (40 * skinFactor), (29 * skinFactor), (150 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), transmissionTime, timeColor, timeColor), (eListboxPythonMultiContent.TYPE_TEXT, (340 * skinFactor), 3, (520 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), serien_name, seriesColor, seriesColor), (eListboxPythonMultiContent.TYPE_TEXT, (340 * skinFactor), (29 * skinFactor), (520 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), title, titleColor, titleColorSelected)]
def buildTopThirtyList(entry):
(serien_name, average, serien_wlid, serieAdded, rank, serien_fsid, serien_info) = entry
if (serieAdded == 1):
seriesColor = parseColor('green').argb()
elif (serieAdded == 2):
seriesColor = parseColor('red').argb()
else:
seriesColor = None
rank = ('%d.' % rank)
title = ('%s (%s)' % (serien_name, serien_info))
subTitle = ('%d Abrufe/Tag' % average)
subTitleColor = parseColor('foreground').argb()
return [entry, (eListboxPythonMultiContent.TYPE_TEXT, (5 * skinFactor), 3, (40 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_RIGHT | RT_VALIGN_CENTER), rank, subTitleColor, subTitleColor), (eListboxPythonMultiContent.TYPE_TEXT, (70 * skinFactor), 3, (620 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), title, seriesColor, seriesColor), (eListboxPythonMultiContent.TYPE_TEXT, (70 * skinFactor), (29 * skinFactor), (620 * skinFactor), (26 * skinFactor), 0, (RT_HALIGN_LEFT | RT_VALIGN_CENTER), subTitle, subTitleColor, subTitleColor)]
def keyOK(self):
if (self.modus == 'list'):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
self.session.openWithCallback(self.addMarker, MessageBox, ("Soll fur die Serie '%s' ein Serien-Marker angelegt werden?" % serien_name), MessageBox.TYPE_YESNO)
def addMarker(self, add):
if add:
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
if config.plugins.serienRec.activateNewOnThisSTBOnly.value:
boxID = config.plugins.serienRec.BoxID.value
else:
boxID = None
if self.database.addMarker(str(serien_wlid), serien_name, serien_info, serien_fsid, boxID, 0):
if boxID:
SRLogger.writeLog(("Ein Serien-Marker fur ' %s ' (%s) wurde auf Box %s angelegt" % (serien_name, serien_info, str(boxID))), True)
else:
SRLogger.writeLog(("Ein Serien-Marker fur ' %s ' (%s) wurde angelegt" % (serien_name, serien_info)), True)
self['title'].setText(("Marker '%s (%s)' wurde angelegt." % (serien_name, serien_info)))
self['title'].instance.setForegroundColor(parseColor('green'))
from .SerienRecorder import getCover
getCover(self, serien_name, serien_fsid, False, True)
if config.plugins.serienRec.openMarkerScreen.value:
from .SerienRecorderMarkerScreen import serienRecMarker
self.session.open(serienRecMarker, serien_fsid)
else:
self['title'].setText(("Marker fur '%s (%s)' ist bereits vorhanden." % (serien_name, serien_info)))
self['title'].instance.setForegroundColor(parseColor('red'))
def getCover(self):
if (self.loading or (self['menu_list'].getCurrent() is None)):
return
(serien_name, serien_wlid, serien_fsid, serien_info) = self.getCurrentSelection()
from .SerienRecorder import getCover
getCover(self, serien_name, serien_fsid)
def keyRed(self):
if (self.modus == 'list'):
if (config.plugins.serienRec.screenplaner.value == 1):
config.plugins.serienRec.screenplaner.value = 2
else:
config.plugins.serienRec.screenplaner.value = 1
config.plugins.serienRec.screenplaner.save()
configfile.save()
self.readPlanerData(False)
def getCurrentSelection(self):
if (config.plugins.serienRec.screenplaner.value == 1):
serien_name = self['menu_list'].getCurrent()[0][5]
serien_wlid = self['menu_list'].getCurrent()[0][13]
serien_fsid = self['menu_list'].getCurrent()[0][14]
serien_info = self['menu_list'].getCurrent()[0][15]
else:
serien_name = self['menu_list'].getCurrent()[0][0]
serien_wlid = self['menu_list'].getCurrent()[0][2]
serien_fsid = self['menu_list'].getCurrent()[0][5]
serien_info = self['menu_list'].getCurrent()[0][6]
return (serien_name, serien_wlid, serien_fsid, serien_info)
def keyGreen(self):
from .SerienRecorderChannelScreen import serienRecMainChannelEdit
self.session.openWithCallback(self.readPlanerData, serienRecMainChannelEdit)
def keyYellow(self):
from .SerienRecorderMarkerScreen import serienRecMarker
self.session.openWithCallback(self.readPlanerData, serienRecMarker)
def keyBlue(self):
from .SerienRecorderTimerListScreen import serienRecTimerListScreen
self.session.openWithCallback(self.readPlanerData, serienRecTimerListScreen)
def keyCheck(self):
from .SerienRecorderAutoCheckScreen import serienRecRunAutoCheckScreen
self.session.openWithCallback(self.readPlanerData, serienRecRunAutoCheckScreen, False)
def keyLeft(self):
if (self.modus == 'list'):
self['menu_list'].pageUp()
self.getCover()
def keyRight(self):
if (self.modus == 'list'):
self['menu_list'].pageDown()
self.getCover()
def keyDown(self):
if (self.modus == 'list'):
self['menu_list'].down()
self.getCover()
def keyUp(self):
if (self.modus == 'list'):
self['menu_list'].up()
self.getCover()
def nextPage(self):
if ((config.plugins.serienRec.screenplaner.value == 1) and (self.page < 4)):
self.page += 1
self.chooseMenuList.setList(list(map(self.buildPlanerList, [])))
self.readPlanerData(False)
def backPage(self):
if ((config.plugins.serienRec.screenplaner.value == 1) and (not (self.page < 1))):
self.page -= 1
self.chooseMenuList.setList(list(map(self.buildPlanerList, [])))
self.readPlanerData(False)
def __onClose(self):
self.stopDisplayTimer()
def keyCancel(self):
if (self.modus == 'list'):
self.stopDisplayTimer()
self.close() |
def extractRiingrringWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def run_apidoc(app, config):
import pathlib
import sphinx.ext.apidoc
root = pathlib.Path(__file__).parent.parent.parent
docs = (root / 'docs')
envisage = (root / 'envisage')
apidoc_args = ['--separate', '--no-toc', '--output-dir', ((docs / 'source') / 'api'), '--templatedir', (((docs / 'source') / 'api') / 'templates'), envisage, '*/tests']
sphinx.ext.apidoc.main(list(map(str, apidoc_args))) |
def get_gene_intervals(all_probes, ignore=params.IGNORE_GENE_NAMES):
ignore += params.ANTITARGET_ALIASES
gene_probes = collections.defaultdict((lambda : collections.defaultdict(list)))
for row in all_probes:
gname = str(row.gene)
if (gname not in ignore):
gene_probes[row.chromosome][gname].append(row)
intervals = collections.defaultdict(list)
for (chrom, gp) in gene_probes.items():
for (gene, probes) in gp.items():
starts = sorted((row.start for row in probes))
end = max((row.end for row in probes))
intervals[chrom].append((gene, starts, end))
intervals[chrom].sort(key=(lambda gse: gse[1]))
return intervals |
.router
.asyncio
class TestVerify():
async def test_empty_body(self, test_app_client: user_manager: UserManagerMock):
response = (await test_app_client.post('/verify', json={}))
assert (response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert (user_manager.verify.called is False)
async def test_invalid_verify_token(self, test_app_client: user_manager: UserManagerMock):
user_manager.verify.side_effect = InvalidVerifyToken()
response = (await test_app_client.post('/verify', json={'token': 'foo'}))
assert (response.status_code == status.HTTP_400_BAD_REQUEST)
data = cast(Dict[(str, Any)], response.json())
assert (data['detail'] == ErrorCode.VERIFY_USER_BAD_TOKEN)
async def test_user_not_exists(self, test_app_client: user_manager: UserManagerMock):
user_manager.verify.side_effect = UserNotExists()
response = (await test_app_client.post('/verify', json={'token': 'foo'}))
assert (response.status_code == status.HTTP_400_BAD_REQUEST)
data = cast(Dict[(str, Any)], response.json())
assert (data['detail'] == ErrorCode.VERIFY_USER_BAD_TOKEN)
async def test_user_already_verified(self, test_app_client: user_manager: UserManagerMock):
user_manager.verify.side_effect = UserAlreadyVerified()
response = (await test_app_client.post('/verify', json={'token': 'foo'}))
assert (response.status_code == status.HTTP_400_BAD_REQUEST)
data = cast(Dict[(str, Any)], response.json())
assert (data['detail'] == ErrorCode.VERIFY_USER_ALREADY_VERIFIED)
async def test_success(self, async_method_mocker: AsyncMethodMocker, test_app_client: user_manager: UserManagerMock, user: UserModel):
async_method_mocker(user_manager, 'verify', return_value=user)
response = (await test_app_client.post('/verify', json={'token': 'foo'}))
assert (response.status_code == status.HTTP_200_OK)
data = cast(Dict[(str, Any)], response.json())
assert (data['id'] == str(user.id))
async def test_verify_namespace(self, get_user_manager):
verify_router = get_verify_router(get_user_manager, User)
app = FastAPI()
app.include_router(verify_router)
assert (app.url_path_for('verify:verify') == '/verify') |
def default_value(field: dataclasses.Field) -> Union[(T, _MISSING_TYPE)]:
if (field.default is not dataclasses.MISSING):
return field.default
elif (field.default_factory is not dataclasses.MISSING):
constructor = field.default_factory
return constructor()
else:
return dataclasses.MISSING |
class CTypesBackend(object):
PRIMITIVE_TYPES = {'char': ctypes.c_char, 'short': ctypes.c_short, 'int': ctypes.c_int, 'long': ctypes.c_long, 'long long': ctypes.c_longlong, 'signed char': ctypes.c_byte, 'unsigned char': ctypes.c_ubyte, 'unsigned short': ctypes.c_ushort, 'unsigned int': ctypes.c_uint, 'unsigned long': ctypes.c_ulong, 'unsigned long long': ctypes.c_ulonglong, 'float': ctypes.c_float, 'double': ctypes.c_double, '_Bool': ctypes.c_bool}
for _name in ['unsigned long long', 'unsigned long', 'unsigned int', 'unsigned short', 'unsigned char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES[('uint%d_t' % (8 * _size))] = PRIMITIVE_TYPES[_name]
if (_size == ctypes.sizeof(ctypes.c_void_p)):
PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name]
if (_size == ctypes.sizeof(ctypes.c_size_t)):
PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name]
for _name in ['long long', 'long', 'int', 'short', 'signed char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES[('int%d_t' % (8 * _size))] = PRIMITIVE_TYPES[_name]
if (_size == ctypes.sizeof(ctypes.c_void_p)):
PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name]
PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name]
if (_size == ctypes.sizeof(ctypes.c_size_t)):
PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name]
def __init__(self):
self.RTLD_LAZY = 0
self.RTLD_NOW = 0
self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL
self.RTLD_LOCAL = ctypes.RTLD_LOCAL
def set_ffi(self, ffi):
self.ffi = ffi
def _get_types(self):
return (CTypesData, CTypesType)
def load_library(self, path, flags=0):
cdll = ctypes.CDLL(path, flags)
return CTypesLibrary(self, cdll)
def new_void_type(self):
class CTypesVoid(CTypesData):
__slots__ = []
_reftypename = 'void &'
def _from_ctypes(novalue):
return None
def _to_ctypes(novalue):
if (novalue is not None):
raise TypeError(('None expected, got %s object' % (type(novalue).__name__,)))
return None
CTypesVoid._fix_class()
return CTypesVoid
def new_primitive_type(self, name):
if (name == 'wchar_t'):
raise NotImplementedError(name)
ctype = self.PRIMITIVE_TYPES[name]
if (name == 'char'):
kind = 'char'
elif (name in ('float', 'double')):
kind = 'float'
else:
if (name in ('signed char', 'unsigned char')):
kind = 'byte'
elif (name == '_Bool'):
kind = 'bool'
else:
kind = 'int'
is_signed = (ctype((- 1)).value == (- 1))
def _cast_source_to_int(source):
if isinstance(source, (int, long, float)):
source = int(source)
elif isinstance(source, CTypesData):
source = source._cast_to_integer()
elif isinstance(source, bytes):
source = ord(source)
elif (source is None):
source = 0
else:
raise TypeError(('bad type for cast to %r: %r' % (CTypesPrimitive, type(source).__name__)))
return source
kind1 = kind
class CTypesPrimitive(CTypesGenericPrimitive):
__slots__ = ['_value']
_ctype = ctype
_reftypename = ('%s &' % name)
kind = kind1
def __init__(self, value):
self._value = value
def _create_ctype_obj(init):
if (init is None):
return ctype()
return ctype(CTypesPrimitive._to_ctypes(init))
if ((kind == 'int') or (kind == 'byte')):
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = ctype(source).value
return cls(source)
def __int__(self):
return self._value
if (kind == 'bool'):
def _cast_from(cls, source):
if (not isinstance(source, (int, long, float))):
source = _cast_source_to_int(source)
return cls(bool(source))
def __int__(self):
return self._value
if (kind == 'char'):
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = bytechr((source & 255))
return cls(source)
def __int__(self):
return ord(self._value)
if (kind == 'float'):
def _cast_from(cls, source):
if isinstance(source, float):
pass
elif isinstance(source, CTypesGenericPrimitive):
if hasattr(source, '__float__'):
source = float(source)
else:
source = int(source)
else:
source = _cast_source_to_int(source)
source = ctype(source).value
return cls(source)
def __int__(self):
return int(self._value)
def __float__(self):
return self._value
_cast_to_integer = __int__
if ((kind == 'int') or (kind == 'byte') or (kind == 'bool')):
def _to_ctypes(x):
if (not isinstance(x, (int, long))):
if isinstance(x, CTypesData):
x = int(x)
else:
raise TypeError(('integer expected, got %s' % type(x).__name__))
if (ctype(x).value != x):
if ((not is_signed) and (x < 0)):
raise OverflowError(('%s: negative integer' % name))
else:
raise OverflowError(('%s: integer out of bounds' % name))
return x
if (kind == 'char'):
def _to_ctypes(x):
if (isinstance(x, bytes) and (len(x) == 1)):
return x
if isinstance(x, CTypesPrimitive):
return x._value
raise TypeError(('character expected, got %s' % type(x).__name__))
if (kind == 'float'):
def _to_ctypes(x):
if (not isinstance(x, (int, long, float, CTypesData))):
raise TypeError(('float expected, got %s' % type(x).__name__))
return ctype(x).value
def _from_ctypes(value):
return getattr(value, 'value', value)
def _initialize(blob, init):
blob.value = CTypesPrimitive._to_ctypes(init)
if (kind == 'char'):
def _to_string(self, maxlen):
return self._value
if (kind == 'byte'):
def _to_string(self, maxlen):
return chr((self._value & 255))
CTypesPrimitive._fix_class()
return CTypesPrimitive
def new_pointer_type(self, BItem):
getbtype = self.ffi._get_cached_btype
if (BItem is getbtype(model.PrimitiveType('char'))):
kind = 'charp'
elif (BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char')))):
kind = 'bytep'
elif (BItem is getbtype(model.void_type)):
kind = 'voidp'
else:
kind = 'generic'
class CTypesPtr(CTypesGenericPtr):
__slots__ = ['_own']
if (kind == 'charp'):
__slots__ += ['__as_strbuf']
_BItem = BItem
if hasattr(BItem, '_ctype'):
_ctype = ctypes.POINTER(BItem._ctype)
_bitem_size = ctypes.sizeof(BItem._ctype)
else:
_ctype = ctypes.c_void_p
if issubclass(BItem, CTypesGenericArray):
_reftypename = BItem._get_c_name('(* &)')
else:
_reftypename = BItem._get_c_name(' * &')
def __init__(self, init):
ctypeobj = BItem._create_ctype_obj(init)
if (kind == 'charp'):
self.__as_strbuf = ctypes.create_string_buffer((ctypeobj.value + b'\x00'))
self._as_ctype_ptr = ctypes.cast(self.__as_strbuf, self._ctype)
else:
self._as_ctype_ptr = ctypes.pointer(ctypeobj)
self._address = ctypes.cast(self._as_ctype_ptr, ctypes.c_void_p).value
self._own = True
def __add__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at((self._address + (other * self._bitem_size)))
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at((self._address - (other * self._bitem_size)))
elif (type(self) is type(other)):
return ((self._address - other._address) // self._bitem_size)
else:
return NotImplemented
def __getitem__(self, index):
if (getattr(self, '_own', False) and (index != 0)):
raise IndexError
return BItem._from_ctypes(self._as_ctype_ptr[index])
def __setitem__(self, index, value):
self._as_ctype_ptr[index] = BItem._to_ctypes(value)
if ((kind == 'charp') or (kind == 'voidp')):
def _arg_to_ctypes(cls, *value):
if (value and isinstance(value[0], bytes)):
return ctypes.c_char_p(value[0])
else:
return super(CTypesPtr, cls)._arg_to_ctypes(*value)
if ((kind == 'charp') or (kind == 'bytep')):
def _to_string(self, maxlen):
if (maxlen < 0):
maxlen = sys.maxsize
p = ctypes.cast(self._as_ctype_ptr, ctypes.POINTER(ctypes.c_char))
n = 0
while ((n < maxlen) and (p[n] != b'\x00')):
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return ('owning %d bytes' % (ctypes.sizeof(self._as_ctype_ptr.contents),))
return super(CTypesPtr, self)._get_own_repr()
if ((BItem is self.ffi._get_cached_btype(model.void_type)) or (BItem is self.ffi._get_cached_btype(model.PrimitiveType('char')))):
CTypesPtr._automatic_casts = True
CTypesPtr._fix_class()
return CTypesPtr
def new_array_type(self, CTypesPtr, length):
if (length is None):
brackets = ' &[]'
else:
brackets = (' &[%d]' % length)
BItem = CTypesPtr._BItem
getbtype = self.ffi._get_cached_btype
if (BItem is getbtype(model.PrimitiveType('char'))):
kind = 'char'
elif (BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char')))):
kind = 'byte'
else:
kind = 'generic'
class CTypesArray(CTypesGenericArray):
__slots__ = ['_blob', '_own']
if (length is not None):
_ctype = (BItem._ctype * length)
else:
__slots__.append('_ctype')
_reftypename = BItem._get_c_name(brackets)
_declared_length = length
_CTPtr = CTypesPtr
def __init__(self, init):
if (length is None):
if isinstance(init, (int, long)):
len1 = init
init = None
elif ((kind == 'char') and isinstance(init, bytes)):
len1 = (len(init) + 1)
else:
init = tuple(init)
len1 = len(init)
self._ctype = (BItem._ctype * len1)
self._blob = self._ctype()
self._own = True
if (init is not None):
self._initialize(self._blob, init)
def _initialize(blob, init):
if isinstance(init, bytes):
init = [init[i:(i + 1)] for i in range(len(init))]
else:
init = tuple(init)
if (len(init) > len(blob)):
raise IndexError('too many initializers')
addr = ctypes.cast(blob, ctypes.c_void_p).value
PTR = ctypes.POINTER(BItem._ctype)
itemsize = ctypes.sizeof(BItem._ctype)
for (i, value) in enumerate(init):
p = ctypes.cast((addr + (i * itemsize)), PTR)
BItem._initialize(p.contents, value)
def __len__(self):
return len(self._blob)
def __getitem__(self, index):
if (not (0 <= index < len(self._blob))):
raise IndexError
return BItem._from_ctypes(self._blob[index])
def __setitem__(self, index, value):
if (not (0 <= index < len(self._blob))):
raise IndexError
self._blob[index] = BItem._to_ctypes(value)
if ((kind == 'char') or (kind == 'byte')):
def _to_string(self, maxlen):
if (maxlen < 0):
maxlen = len(self._blob)
p = ctypes.cast(self._blob, ctypes.POINTER(ctypes.c_char))
n = 0
while ((n < maxlen) and (p[n] != b'\x00')):
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return ('owning %d bytes' % (ctypes.sizeof(self._blob),))
return super(CTypesArray, self)._get_own_repr()
def _convert_to_address(self, BClass):
if ((BClass in (CTypesPtr, None)) or BClass._automatic_casts):
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
def _from_ctypes(ctypes_array):
self = CTypesArray.__new__(CTypesArray)
self._blob = ctypes_array
return self
def _arg_to_ctypes(value):
return CTypesPtr._arg_to_ctypes(value)
def __add__(self, other):
if isinstance(other, (int, long)):
return CTypesPtr._new_pointer_at((ctypes.addressof(self._blob) + (other * ctypes.sizeof(BItem._ctype))))
else:
return NotImplemented
def _cast_from(cls, source):
raise NotImplementedError(('casting to %r' % (cls._get_c_name(),)))
CTypesArray._fix_class()
return CTypesArray
def _new_struct_or_union(self, kind, name, base_ctypes_class):
class struct_or_union(base_ctypes_class):
pass
struct_or_union.__name__ = ('%s_%s' % (kind, name))
kind1 = kind
class CTypesStructOrUnion(CTypesBaseStructOrUnion):
__slots__ = ['_blob']
_ctype = struct_or_union
_reftypename = ('%s &' % (name,))
_kind = kind = kind1
CTypesStructOrUnion._fix_class()
return CTypesStructOrUnion
def new_struct_type(self, name):
return self._new_struct_or_union('struct', name, ctypes.Structure)
def new_union_type(self, name):
return self._new_struct_or_union('union', name, ctypes.Union)
def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, totalsize=(- 1), totalalignment=(- 1), sflags=0):
if ((totalsize >= 0) or (totalalignment >= 0)):
raise NotImplementedError('the ctypes backend of CFFI does not support structures completed by verify(); please compile and install the _cffi_backend module.')
struct_or_union = CTypesStructOrUnion._ctype
fnames = [fname for (fname, BField, bitsize) in fields]
btypes = [BField for (fname, BField, bitsize) in fields]
bitfields = [bitsize for (fname, BField, bitsize) in fields]
bfield_types = {}
cfields = []
for (fname, BField, bitsize) in fields:
if (bitsize < 0):
cfields.append((fname, BField._ctype))
bfield_types[fname] = BField
else:
cfields.append((fname, BField._ctype, bitsize))
bfield_types[fname] = Ellipsis
if (sflags & 8):
struct_or_union._pack_ = 1
struct_or_union._fields_ = cfields
CTypesStructOrUnion._bfield_types = bfield_types
def _create_ctype_obj(init):
result = struct_or_union()
if (init is not None):
initialize(result, init)
return result
CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj
def initialize(blob, init):
if is_union:
if (len(init) > 1):
raise ValueError(('union initializer: %d items given, but only one supported (use a dict if needed)' % (len(init),)))
if (not isinstance(init, dict)):
if isinstance(init, (bytes, unicode)):
raise TypeError('union initializer: got a str')
init = tuple(init)
if (len(init) > len(fnames)):
raise ValueError(('too many values for %s initializer' % CTypesStructOrUnion._get_c_name()))
init = dict(zip(fnames, init))
addr = ctypes.addressof(blob)
for (fname, value) in init.items():
(BField, bitsize) = name2fieldtype[fname]
assert (bitsize < 0), 'not implemented: initializer with bit fields'
offset = CTypesStructOrUnion._offsetof(fname)
PTR = ctypes.POINTER(BField._ctype)
p = ctypes.cast((addr + offset), PTR)
BField._initialize(p.contents, value)
is_union = (CTypesStructOrUnion._kind == 'union')
name2fieldtype = dict(zip(fnames, zip(btypes, bitfields)))
for (fname, BField, bitsize) in fields:
if (fname == ''):
raise NotImplementedError('nested anonymous structs/unions')
if hasattr(CTypesStructOrUnion, fname):
raise ValueError(('the field name %r conflicts in the ctypes backend' % fname))
if (bitsize < 0):
def getter(self, fname=fname, BField=BField, offset=CTypesStructOrUnion._offsetof(fname), PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast((addr + offset), PTR)
return BField._from_ctypes(p.contents)
def setter(self, value, fname=fname, BField=BField):
setattr(self._blob, fname, BField._to_ctypes(value))
if issubclass(BField, CTypesGenericArray):
setter = None
if (BField._declared_length == 0):
def getter(self, fname=fname, BFieldPtr=BField._CTPtr, offset=CTypesStructOrUnion._offsetof(fname), PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast((addr + offset), PTR)
return BFieldPtr._from_ctypes(p)
else:
def getter(self, fname=fname, BField=BField):
return BField._from_ctypes(getattr(self._blob, fname))
def setter(self, value, fname=fname, BField=BField):
value = BField._to_ctypes(value)
oldvalue = getattr(self._blob, fname)
setattr(self._blob, fname, value)
if (value != getattr(self._blob, fname)):
setattr(self._blob, fname, oldvalue)
raise OverflowError('value too large for bitfield')
setattr(CTypesStructOrUnion, fname, property(getter, setter))
CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp))
for fname in fnames:
if hasattr(CTypesPtr, fname):
raise ValueError(('the field name %r conflicts in the ctypes backend' % fname))
def getter(self, fname=fname):
return getattr(self[0], fname)
def setter(self, value, fname=fname):
setattr(self[0], fname, value)
setattr(CTypesPtr, fname, property(getter, setter))
def new_function_type(self, BArgs, BResult, has_varargs):
nameargs = [BArg._get_c_name() for BArg in BArgs]
if has_varargs:
nameargs.append('...')
nameargs = ', '.join(nameargs)
class CTypesFunctionPtr(CTypesGenericPtr):
__slots__ = ['_own_callback', '_name']
_ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), *[BArg._ctype for BArg in BArgs], use_errno=True)
_reftypename = BResult._get_c_name(('(* &)(%s)' % (nameargs,)))
def __init__(self, init, error=None):
import traceback
assert (not has_varargs), 'varargs not supported for callbacks'
if (getattr(BResult, '_ctype', None) is not None):
error = BResult._from_ctypes(BResult._create_ctype_obj(error))
else:
error = None
def callback(*args):
args2 = []
for (arg, BArg) in zip(args, BArgs):
args2.append(BArg._from_ctypes(arg))
try:
res2 = init(*args2)
res2 = BResult._to_ctypes(res2)
except:
traceback.print_exc()
res2 = error
if issubclass(BResult, CTypesGenericPtr):
if res2:
res2 = ctypes.cast(res2, ctypes.c_void_p).value
else:
res2 = None
return res2
if issubclass(BResult, CTypesGenericPtr):
callback_ctype = ctypes.CFUNCTYPE(ctypes.c_void_p, *[BArg._ctype for BArg in BArgs], use_errno=True)
else:
callback_ctype = CTypesFunctionPtr._ctype
self._as_ctype_ptr = callback_ctype(callback)
self._address = ctypes.cast(self._as_ctype_ptr, ctypes.c_void_p).value
self._own_callback = init
def _initialize(ctypes_ptr, value):
if value:
raise NotImplementedError('ctypes backend: not supported: initializers for function pointers')
def __repr__(self):
c_name = getattr(self, '_name', None)
if c_name:
i = self._reftypename.index('(* &)')
if (self._reftypename[(i - 1)] not in ' )*'):
c_name = (' ' + c_name)
c_name = self._reftypename.replace('(* &)', c_name)
return CTypesData.__repr__(self, c_name)
def _get_own_repr(self):
if (getattr(self, '_own_callback', None) is not None):
return ('calling %r' % (self._own_callback,))
return super(CTypesFunctionPtr, self)._get_own_repr()
def __call__(self, *args):
if has_varargs:
assert (len(args) >= len(BArgs))
extraargs = args[len(BArgs):]
args = args[:len(BArgs)]
else:
assert (len(args) == len(BArgs))
ctypes_args = []
for (arg, BArg) in zip(args, BArgs):
ctypes_args.append(BArg._arg_to_ctypes(arg))
if has_varargs:
for (i, arg) in enumerate(extraargs):
if (arg is None):
ctypes_args.append(ctypes.c_void_p(0))
continue
if (not isinstance(arg, CTypesData)):
raise TypeError(('argument %d passed in the variadic part needs to be a cdata object (got %s)' % (((1 + len(BArgs)) + i), type(arg).__name__)))
ctypes_args.append(arg._arg_to_ctypes(arg))
result = self._as_ctype_ptr(*ctypes_args)
return BResult._from_ctypes(result)
CTypesFunctionPtr._fix_class()
return CTypesFunctionPtr
def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
assert isinstance(name, str)
reverse_mapping = dict(zip(reversed(enumvalues), reversed(enumerators)))
class CTypesEnum(CTypesInt):
__slots__ = []
_reftypename = ('%s &' % name)
def _get_own_repr(self):
value = self._value
try:
return ('%d: %s' % (value, reverse_mapping[value]))
except KeyError:
return str(value)
def _to_string(self, maxlen):
value = self._value
try:
return reverse_mapping[value]
except KeyError:
return str(value)
CTypesEnum._fix_class()
return CTypesEnum
def get_errno(self):
return ctypes.get_errno()
def set_errno(self, value):
ctypes.set_errno(value)
def string(self, b, maxlen=(- 1)):
return b._to_string(maxlen)
def buffer(self, bptr, size=(- 1)):
raise NotImplementedError('buffer() with ctypes backend')
def sizeof(self, cdata_or_BType):
if isinstance(cdata_or_BType, CTypesData):
return cdata_or_BType._get_size_of_instance()
else:
assert issubclass(cdata_or_BType, CTypesData)
return cdata_or_BType._get_size()
def alignof(self, BType):
assert issubclass(BType, CTypesData)
return BType._alignment()
def newp(self, BType, source):
if (not issubclass(BType, CTypesData)):
raise TypeError
return BType._newp(source)
def cast(self, BType, source):
return BType._cast_from(source)
def callback(self, BType, source, error):
return BType(source, error)
typeof = type
def getcname(self, BType, replace_with):
return BType._get_c_name(replace_with)
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if ((num == 0) and issubclass(BType, CTypesGenericPtr)):
BType = BType._BItem
if (not issubclass(BType, CTypesBaseStructOrUnion)):
raise TypeError('expected a struct or union ctype')
BField = BType._bfield_types[fieldname]
if (BField is Ellipsis):
raise TypeError('not supported for bitfields')
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if (not issubclass(BType, CTypesGenericPtr)):
raise TypeError('expected an array or ptr ctype')
BItem = BType._BItem
offset = (BItem._get_size() * fieldname)
if (offset > sys.maxsize):
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def rawaddressof(self, BTypePtr, cdata, offset=None):
if isinstance(cdata, CTypesBaseStructOrUnion):
ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata))
elif isinstance(cdata, CTypesGenericPtr):
if ((offset is None) or (not issubclass(type(cdata)._BItem, CTypesBaseStructOrUnion))):
raise TypeError('unexpected cdata type')
ptr = type(cdata)._to_ctypes(cdata)
elif isinstance(cdata, CTypesGenericArray):
ptr = type(cdata)._to_ctypes(cdata)
else:
raise TypeError("expected a <cdata 'struct-or-union'>")
if offset:
ptr = ctypes.cast(ctypes.c_void_p((ctypes.cast(ptr, ctypes.c_void_p).value + offset)), type(ptr))
return BTypePtr._from_ctypes(ptr) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.