text string | size int64 | token_count int64 |
|---|---|---|
from django.forms import formset_factory
from django.utils import timezone
from django.shortcuts import get_object_or_404
import bleach
import json
from .forms import CreatePowerForm, make_enhancement_form, make_drawback_form, make_parameter_form, \
SystemFieldRollForm, SystemFieldTextForm, MIND_, BODY_, PARRY_
from .models import Enhancement_Instance, Drawback_Instance, Power, DICE_SYSTEM, Enhancement, Drawback, \
Power_Param, SystemFieldText, SystemFieldRoll, SystemFieldTextInstance, SystemFieldRollInstance, \
Parameter_Value, Base_Power_System, Power_Full, CREATION_REASON, PowerTutorial
from characters.models import Roll, Attribute, Ability, NO_PARRY_INFO, REACTION, THROWN
def get_create_power_context_from_base(base_power, character=None):
system = base_power.get_system()
primary_form = CreatePowerForm(base_power, initial={'system': system.system_text})
enhancement_forms = []
for enhancement in Enhancement.objects.filter(pk__in=base_power.enhancements.all()):
enhancement_forms.append(formset_factory(make_enhancement_form(enhancement), extra = 1)())
drawback_forms = []
for drawback in Drawback.objects.filter(pk__in=base_power.drawbacks.all()):
drawback_forms.append(formset_factory(make_drawback_form(drawback), extra = 1)())
parameter_forms = []
for parameter in Power_Param.objects.filter(relevant_base_power=base_power).all():
parameter_forms.append(formset_factory(make_parameter_form(parameter))())
roll_fields_formset = _get_system_roll_field_formset(system)
text_fields_formset = _get_system_text_field_formset(system)
system = Base_Power_System.objects.filter(dice_system=DICE_SYSTEM[1][0]).get(base_power=base_power.slug)
requirements = _get_modifier_requirements(Enhancement.objects.filter(pk__in=base_power.enhancements.all()),
Drawback.objects.filter(pk__in=base_power.drawbacks.all()))
context = {
'base_power': base_power,
'power_system': system,
'form': primary_form,
'parameters': parameter_forms,
'enhancements': enhancement_forms,
'drawbacks': drawback_forms,
'requirements_json': json.dumps(requirements),
'character': character,
'roll_fields': roll_fields_formset,
'text_fields': text_fields_formset,
}
if character:
unspent_rewards = []
for reward in character.unspent_rewards().all():
unspent_rewards.append("{} from {}".format(reward.type_text(), reward.reason_text()))
context["unspent_rewards_json"] = json.dumps(unspent_rewards)
spent_rewards = []
context["spent_rewards_json"] = json.dumps(spent_rewards)
context = _add_tutorial_to_context(context)
return context
def get_create_power_context_from_power(power, new=True):
initial = {'system': power.get_system(),
'description': power.description,
'flavor': power.flavor_text,
'activation_style': power.activation_style,
'power_name': power.name}
if power.parent_power:
initial['tags'] = power.parent_power.tags.all()
initial['example_description'] = power.parent_power.example_description
system = Base_Power_System.objects.filter(dice_system=DICE_SYSTEM[1][0]).get(base_power=power.base.slug)
text_fields_formset = _get_text_field_formsets_for_edit(power, system)
roll_fields_formset = _get_roll_field_formsets_for_edit(power, system)
primary_form = CreatePowerForm(power.base,
initial=initial)
enhancement_forms = _get_enhancement_formsets_from_power(power)
drawback_forms = _get_drawback_formsets_from_power(power)
parameter_forms = []
for parameter_value in Parameter_Value.objects.filter(relevant_power=power).all():
init = [{'level_picker': parameter_value.value}]
parameter_forms.append(formset_factory(make_parameter_form(parameter_value.relevant_power_param), extra = 0)(initial = init))
requirements = _get_modifier_requirements(Enhancement.objects.filter(pk__in=power.base.enhancements.all()),
Drawback.objects.filter(pk__in=power.base.drawbacks.all()))
context = {
'base_power': power.base,
'power_system': system,
'form': primary_form,
'parameters': parameter_forms,
'enhancements': enhancement_forms,
'drawbacks': drawback_forms,
'requirements_json': json.dumps(requirements),
'roll_fields': roll_fields_formset,
'text_fields': text_fields_formset,
}
if power.parent_power is not None:
if power.parent_power.character is not None and new:
context["character"] = power.parent_power.character
unspent_rewards = []
for reward in power.parent_power.character.unspent_rewards().all():
unspent_rewards.append("{} from {}".format(reward.type_text(), reward.reason_text()))
context["unspent_rewards_json"] = json.dumps(unspent_rewards)
spent_rewards = []
for reward in power.parent_power.reward_list():
spent_rewards.append("{} from {}".format(reward.type_text(), reward.reason_text()))
context["spent_rewards_json"] = json.dumps(spent_rewards)
context = _add_tutorial_to_context(context)
return context
def _get_text_field_formsets_for_edit(power, system):
TextFieldsFormset = formset_factory(SystemFieldTextForm, extra=0)
text_system_fields = system.systemfieldtext_set.order_by("id").all()
instances = power.systemfieldtextinstance_set.all()
value_by_field_id = {n.relevant_field.id: n.value for n in instances}
return TextFieldsFormset(
initial=[{'system_field_id': x.id,
'system_field': x,
'field_text': value_by_field_id[x.id] if x.id in value_by_field_id else ""
} for x in text_system_fields],
prefix="system_text_fields")
def _get_roll_field_formsets_for_edit(power, system):
RollFieldsFormset = formset_factory(SystemFieldRollForm, extra=0)
roll_system_fields = system.systemfieldroll_set.order_by("id").all()
instances = power.systemfieldrollinstance_set.all()
value_by_field_id = {n.relevant_field.id: n.roll for n in instances}
return RollFieldsFormset(
initial=[{'system_field_id': x.id,
'system_field': x,
'ability_roll': _get_roll_initial_ability(value_by_field_id[x.id]) if x.id in value_by_field_id else None,
'attribute_roll': _get_roll_initial_attribute(value_by_field_id[x.id]) if x.id in value_by_field_id else None,
}
for x in roll_system_fields],
prefix="system_roll_fields")
def _get_roll_initial_ability(roll):
if roll.ability:
return roll.ability.id
else:
return None
def _get_roll_initial_attribute(roll):
if roll.attribute:
return roll.attribute.id
elif roll.is_mind:
return MIND_
elif roll.is_body:
return BODY_
elif roll.parry_type != NO_PARRY_INFO:
return PARRY_
else:
raise ValueError("Unknown roll attribute")
def get_edit_power_context_from_power(og_power):
context = get_create_power_context_from_power(og_power)
if og_power.parent_power is not None and og_power.parent_power.owner is not None:
context["owner"] = og_power.parent_power.owner
context["og_power"] = og_power
return context
def create_power_for_new_edit(base_power, request, power_full):
power_form = CreatePowerForm(base_power, request.POST)
if power_form.is_valid():
old_power = power_full.latest_revision()
if request.user.is_superuser:
power_full.tags.set(power_form.cleaned_data["tags"])
power_full.example_description = power_form.cleaned_data["example_description"]
power_full.save()
new_power = _create_power_from_post_and_base(base_power, request, power_full)
new_power.creation_reason = _get_power_creation_reason(new_power, old_power)
new_power.creation_reason_expanded_text = _get_power_creation_reason_expanded_text(new_power, old_power)
new_power.save()
if hasattr(power_full, "character") and power_full.character:
power_full.character.reset_attribute_bonuses()
return new_power
def create_new_power_and_parent(base_power, request, character=None):
form = CreatePowerForm(base_power, request.POST)
if form.is_valid():
power_full = _create_new_full_power(power_form=form, base=base_power)
if request.user.id:
power_full.owner = request.user
if character:
power_full.character = character
power_full.save()
if request.user.is_superuser:
power_full.tags.set(form.cleaned_data["tags"])
power_full.example_description = form.cleaned_data["example_description"]
power_full.save()
new_power = _create_power_from_post_and_base(base_power, request, power_full)
new_power.creation_reason = CREATION_REASON[0][0]
new_power.creation_reason_expanded_text = "Initial power creation"
new_power.save()
if character:
character.reset_attribute_bonuses()
return new_power
else:
print(form.errors)
return None
def refund_or_assign_rewards(new_power, old_power=None):
og_point_value = 0
if old_power:
og_point_value=old_power.get_point_value()
delta = new_power.get_point_value() - og_point_value
if delta == 0:
return
if delta > 0:
if new_power.parent_power.character is not None:
unspent_gifts = new_power.parent_power.character.unspent_rewards()
for a in range(delta):
if a == len(unspent_gifts):
break
unspent_gifts[a].assign_to_power(new_power)
if delta < 0:
if new_power.parent_power.character is not None and old_power:
spent_gifts = old_power.parent_power.reward_list()
for a in range(delta*-1):
if a == len(spent_gifts):
break
spent_gifts[a].refund_keeping_character_assignment()
def _get_enhancement_formsets_from_power(power):
enhancement_forms = []
enhancement_instances = Enhancement_Instance.objects.filter(relevant_power=power).all()
for base_enhancement in Enhancement.objects.filter(pk__in=power.base.enhancements.all()):
instances_of_this_enhancement = set(
x for x in enhancement_instances if (x.relevant_enhancement == base_enhancement))
init = []
num_extra = 0
for enhancement_instance in instances_of_this_enhancement:
init.append({
'is_selected': True,
'detail_text': enhancement_instance.detail,
})
if base_enhancement.multiplicity_allowed or not instances_of_this_enhancement:
num_extra = 1
new_form = formset_factory(make_enhancement_form(base_enhancement), extra=num_extra, max_num=4)(initial=init)
enhancement_forms.append(new_form)
return enhancement_forms
def _get_drawback_formsets_from_power(power):
drawback_forms = []
drawback_instances = Drawback_Instance.objects.filter(relevant_power=power).all()
for base_drawback in Drawback.objects.filter(pk__in=power.base.drawbacks.all()):
instances_of_this_drawback = set(
x for x in drawback_instances if (x.relevant_drawback == base_drawback))
init = []
num_extra = 0
for drawback_instance in instances_of_this_drawback:
init.append({
'is_selected': True,
'detail_text': drawback_instance.detail,
})
if base_drawback.multiplicity_allowed or not instances_of_this_drawback:
num_extra = 1
new_form = formset_factory(make_drawback_form(base_drawback), extra=num_extra, max_num=4)(initial=init)
drawback_forms.append(new_form)
return drawback_forms
def _add_tutorial_to_context(context):
tutorial = get_object_or_404(PowerTutorial)
context['modal_header'] = tutorial.modal_edit_header
context['modal_text'] = tutorial.modal_edit
context['modal_art'] = 'overrides/art/ocean-walking-copy.jpg'
return context
def _get_modifier_requirements(enhancements, drawbacks):
requirements = {}
for enhancement in enhancements:
if enhancement.required_Enhancements:
required = []
for req_enhancement in enhancement.required_Enhancements.all():
required.append( req_enhancement.form_name() )
requirements[enhancement.form_name()] = required
for drawback in drawbacks:
if drawback.required_drawbacks:
required = []
for req_drawback in drawback.required_drawbacks.all():
required.append(req_drawback.form_name())
requirements[drawback.form_name()] = required
return requirements
def _get_enhancement_instances(post_data, enhancements, new_power):
instances = []
for enhancement in enhancements:
if enhancement.slug + "-e-is_selected" in post_data:
detail_texts = []
if enhancement.slug + "-e-detail_text" in post_data:
detail_texts = post_data.getlist(enhancement.slug + "-e-detail_text")
for on in post_data.getlist(enhancement.slug + "-e-is_selected"):
if detail_texts:
new_detail_text = bleach.clean(detail_texts.pop(0))
else:
new_detail_text = ""
instances.append(Enhancement_Instance(relevant_enhancement=enhancement,
relevant_power=new_power,
detail=new_detail_text))
return instances
def _get_drawback_instances(post_data, drawbacks, new_power):
instances = []
for drawback in drawbacks:
if drawback.slug + "-d-is_selected" in post_data:
detail_texts = []
if drawback.slug + "-d-detail_text" in post_data:
detail_texts = post_data.getlist(drawback.slug + "-d-detail_text")
for on in post_data.getlist(drawback.slug + "-d-is_selected"):
if detail_texts:
new_detail_text = bleach.clean(detail_texts.pop(0))
else:
new_detail_text = ""
instances.append(Drawback_Instance(relevant_drawback=drawback,
relevant_power=new_power,
detail=new_detail_text))
return instances
def _create_new_full_power(power_form, base):
return Power_Full(name=power_form.cleaned_data['power_name'],
dice_system=DICE_SYSTEM[1][0],
base=base,
pub_date=timezone.now())
def _get_power_from_form(power_form, base):
return Power(name=power_form.cleaned_data['power_name'],
flavor_text=power_form.cleaned_data['flavor'],
description=power_form.cleaned_data['description'],
system=power_form.cleaned_data['system'],
activation_style=power_form.cleaned_data['activation_style'],
base=base,
dice_system=DICE_SYSTEM[1][0],
pub_date=timezone.now())
def _get_roll_from_form_and_system(form, system_field):
attr = form.cleaned_data["attribute_roll"]
difficulty = 6
if system_field.difficulty:
difficulty = system_field.difficulty
if attr == BODY_[0] or attr == MIND_[0] or attr == PARRY_[0]:
if attr == BODY_[0]:
return Roll.get_body_roll(difficulty=difficulty)
elif attr == MIND_[0]:
return Roll.get_mind_roll(difficulty=difficulty)
elif attr == PARRY_[0]:
return Roll.get_roll(difficulty=difficulty, parry_type=system_field.parry_type, speed=REACTION)
else:
raise ValueError("Unexpected attr")
else:
attribute = get_object_or_404(Attribute, id=attr)
ability = get_object_or_404(Ability, id=form.cleaned_data["ability_roll"])
return Roll.get_roll(attribute = attribute,
ability = ability,
difficulty = difficulty,
speed=system_field.speed)
def _create_power_from_post_and_base(base_power, request, power_full):
form = CreatePowerForm(base_power, request.POST)
if form.is_valid():
system = Base_Power_System.objects.filter(dice_system=DICE_SYSTEM[1][0]).get(base_power=base_power.slug)
power = _get_power_from_form(power_form=form, base=base_power)
if request.user.id:
power.created_by = request.user
power.parent_power = power_full
power.save()
enhancement_instances = _get_enhancement_instances(post_data=request.POST,
enhancements=Enhancement.objects.filter(
pk__in=base_power.enhancements.all()),
new_power=power)
for enhancement_instance in enhancement_instances:
enhancement_instance.save()
drawback_instances = _get_drawback_instances(post_data=request.POST,
drawbacks=Drawback.objects.filter(
pk__in=base_power.drawbacks.all()),
new_power=power)
for drawback_instance in drawback_instances:
drawback_instance.save()
for power_param in Power_Param.objects.filter(relevant_base_power=base_power):
param_val = Parameter_Value(relevant_power=power,
relevant_power_param=power_param,
value=request.POST[power_param.relevant_parameter.slug])
param_val.save()
text_field_formset = _get_system_text_field_formset(system, request.POST)
if text_field_formset.is_valid():
for form in text_field_formset:
system_field = get_object_or_404(SystemFieldText, id=form.cleaned_data["system_field_id"])
field_instance = SystemFieldTextInstance(relevant_power=power,
relevant_field=system_field,
value=form.cleaned_data["field_text"])
field_instance.save()
else:
raise ValueError("Invalid text field formset")
roll_field_formset = _get_system_roll_field_formset(system, request.POST)
if roll_field_formset.is_valid():
for form in roll_field_formset:
system_field = get_object_or_404(SystemFieldRoll, id=form.cleaned_data["system_field_id"])
roll = _get_roll_from_form_and_system(form, system_field)
field_instance = SystemFieldRollInstance(relevant_power=power,
relevant_field=system_field,
roll=roll)
field_instance.save()
else:
raise ValueError("Invalid roll field formset")
return power
else:
raise ValueError("Invalid Power Form")
def _get_system_text_field_formset(system, POST = None):
TextFieldsFormset = formset_factory(SystemFieldTextForm, extra=0)
text_system_fields = system.systemfieldtext_set.order_by("id").all()
text_fields_formset = TextFieldsFormset(
POST,
initial=[{'system_field_id': x.id, 'system_field': x} for x in text_system_fields],
prefix="system_text_fields")
return text_fields_formset
def _get_system_roll_field_formset(system, POST=None):
RollFieldsFormset = formset_factory(SystemFieldRollForm, extra=0)
roll_system_fields = system.systemfieldroll_set.order_by("id").all()
roll_fields_formset = RollFieldsFormset(
POST,
initial=[{'system_field_id': x.id, 'system_field': x} for x in roll_system_fields],
prefix="system_roll_fields")
return roll_fields_formset
def _get_power_creation_reason(new_power, old_power):
if old_power is None:
# new
return CREATION_REASON[0][0]
new_points = new_power.get_point_value()
old_points = old_power.get_point_value()
if new_points > old_points:
# improvement
return CREATION_REASON[1][0]
if new_points < old_points\
or _get_param_difference_text(new_power, old_power)\
or _get_added_enhancements(new_power, old_power)\
or _get_removed_enhancements(new_power, old_power)\
or _get_added_drawbacks(new_power, old_power)\
or _get_removed_drawbacks(new_power, old_power):
# revision
return CREATION_REASON[2][0]
# adjustment
return CREATION_REASON[3][0]
def _get_power_creation_reason_expanded_text(new_power, old_power):
edit_text = ""
if new_power.creation_reason == CREATION_REASON[3][0]:
edit_text = "Text field change"
if new_power.creation_reason == CREATION_REASON[1][0] or new_power.creation_reason == CREATION_REASON[2][0]:
# improvement or revision
added_enhancements = _get_added_enhancements(new_power, old_power)
if len(added_enhancements) > 0:
edit_text = edit_text + "Added Enhancement"
if len(added_enhancements) > 1:
edit_text = edit_text + "s"
edit_text = edit_text + ": "
for enhancement in added_enhancements:
edit_text = edit_text + enhancement.relevant_enhancement.name + ", "
removed_enhancements = _get_removed_enhancements(new_power, old_power)
if len(removed_enhancements) > 0:
edit_text = edit_text + "Removed Enhancement"
if len(removed_enhancements) > 1:
edit_text = edit_text + "s"
edit_text = edit_text + ": "
for enhancement in removed_enhancements:
edit_text = edit_text + enhancement.relevant_enhancement.name + ", "
added_drawbacks = _get_added_drawbacks(new_power, old_power)
if len(added_drawbacks) > 0:
edit_text = edit_text + "Added Drawback"
if len(added_drawbacks) > 1:
edit_text = edit_text + "s"
edit_text = edit_text + ": "
for drawback in added_drawbacks:
edit_text = edit_text + drawback.relevant_drawback.name + ", "
removed_drawbacks = _get_removed_drawbacks(new_power, old_power)
if len(removed_drawbacks) > 0:
edit_text = edit_text + "Removed Drawback"
if len(removed_drawbacks) > 1:
edit_text = edit_text + "s"
edit_text = edit_text + ": "
for drawback in removed_drawbacks:
edit_text = edit_text + drawback.relevant_drawback.name + ", "
edit_text = edit_text + _get_param_difference_text(new_power, old_power)
#stopgap bugfix measure until we fix the _get_added_enhancements method by properly using form fields.
if len(edit_text) < 3:
edit_text = "Power Adjustment"
if edit_text[-2] == ',':
edit_text = edit_text[:-2]
return edit_text[:1500]
def _get_added_enhancements(new_power, old_power):
added_enhancements = []
for new_enhancement in new_power.enhancement_instance_set.all():
in_old = False
for old_enhancement in old_power.enhancement_instance_set.all():
if old_enhancement.relevant_enhancement.slug == new_enhancement.relevant_enhancement.slug:
in_old = True
if not in_old:
added_enhancements.append(new_enhancement)
return added_enhancements
def _get_removed_enhancements(new_power, old_power):
removed_enhancements = []
for old_enhancement in old_power.enhancement_instance_set.all():
in_new = False
for new_enhancement in new_power.enhancement_instance_set.all():
if old_enhancement.relevant_enhancement.slug == new_enhancement.relevant_enhancement.slug:
in_new = True
if not in_new:
removed_enhancements.append(old_enhancement)
return removed_enhancements
def _get_added_drawbacks(new_power, old_power):
added_drawbacks = []
for new_drawback in new_power.drawback_instance_set.all():
in_old = False
for old_drawback in old_power.drawback_instance_set.all():
if old_drawback.relevant_drawback.slug == new_drawback.relevant_drawback.slug:
in_old = True
if not in_old:
added_drawbacks.append(new_drawback)
return added_drawbacks
def _get_removed_drawbacks(new_power, old_power):
removed_drawbacks = []
for old_drawback in old_power.drawback_instance_set.all():
in_new = False
for new_drawback in new_power.drawback_instance_set.all():
if old_drawback.relevant_drawback.slug == new_drawback.relevant_drawback.slug:
in_new = True
if not in_new:
removed_drawbacks.append(old_drawback)
return removed_drawbacks
def _get_param_difference_text(new_power, old_power):
param_text = ""
param_counter = 0
for new_param_value in new_power.parameter_value_set.order_by('relevant_power_param_id').all():
try:
old_param_value = old_power.parameter_value_set.order_by('relevant_power_param_id').all()[param_counter]
if old_param_value.value != new_param_value.value:
param_text = param_text + "Parameter {} changed from {} to {}. "
param_text = param_text.format(new_param_value.relevant_power_param.relevant_parameter.name, old_param_value.value, new_param_value.value)
except:
return "Base Parameters Changed. "
param_counter = param_counter + 1
return param_text | 26,298 | 8,012 |
import torch
import torchcrepe
###############################################################################
# CREPE perceptual loss
###############################################################################
class CREPEPerceptualLoss(torch.nn.Module):
def __init__(self):
super().__init__()
# Register model
self.add_module('model', torchcrepe.Crepe())
# Don't update model weights
self.requires_grad_(False)
def forward(self, x, y):
# Get feature maps
x_maps = self.activations(x)
y_maps = self.activations(y)
# Compute distance
loss = 0.
for x_map, y_map in zip(x_maps, y_maps):
loss += torch.nn.functional.l1_loss(x_map, y_map)
return loss
def activations(self, x):
activations = []
# shape=(batch, 1, 1024, 1)
x = x[:, None, :, None]
# Forward pass through model and save activations
x = self.model.layer(x, self.model.conv1, self.model.conv1_BN, (0, 0, 254, 254))
activations.append(x)
x = self.model.layer(x, self.model.conv2, self.model.conv2_BN)
activations.append(x)
x = self.model.layer(x, self.model.conv3, self.model.conv3_BN)
activations.append(x)
x = self.model.layer(x, self.model.conv4, self.model.conv4_BN)
activations.append(x)
x = self.model.layer(x, self.model.conv5, self.model.conv5_BN)
activations.append(x)
x = self.model.layer(x, self.model.conv6, self.model.conv6_BN)
activations.append(x)
# shape=(batch, self.in_features)
x = x.permute(0, 2, 1, 3).reshape(-1, self.model.in_features)
# Compute unnormalized probability distribution
x = self.model.classifier(x)
activations.append(x)
return activations
| 1,877 | 629 |
from rec_to_nwb.processing.nwb.components.analog.fl_analog import FlAnalog
class OldFlAnalogBuilder:
@staticmethod
def build(data, timestamps, description):
return FlAnalog(data, timestamps, description)
| 223 | 74 |
import sys
sys.path.append(r"D:\Dupre\_data\program\pyhome")
import pyhome3
from pyhome3 import HalLOG
from pyhome3.srcpyhome.internet.simple_server.simple_server_custom import run_server
HalLOG(OutputPrint=True)
HalLOG("running server")
run_server(None)
HalLOG("end running server")
# http://localhost:8080/localfile/D:\Dupre\_data\informatique\support\python_td_2013\programme\td9_by_hours.json
# http://localhost:8080/debug_string/
| 438 | 166 |
# -*- coding:/ utf-8 -*-
"""
Created on Tue Jul 23 12:07:20 2019
This piece of software is bound by The MIT License (MIT)
Copyright (c) 2019 Prashank Kadam
Code written by : Prashank Kadam
User name - ADM-PKA187
Email ID : prashank.kadam@maersktankers.com
Created on - Tue Jul 30 10:00:14 2019
version : 1.0
"""
# Importing the required libraries
import pandas as pd
import xml.etree.ElementTree as et
# Importing the standard semark light format for sea and port reports inorder to ger the column
# names of all the standard columns into runtime
df_init_sea = pd.read_excel('semark_light.xlsx', 'Sea')
df_init_port = pd.read_excel('semark_light.xlsx', 'Port')
# Importing the xml converted data into a dataframe
df_target = pd.read_excel('test_1.xlsx')
# Taking a subset of only the required columns from the dataframe
df_target = df_target[['ImoNumber', 'VesselName', 'ReportTime', 'Longitude', 'Port',
'Location', 'Latitude', 'VoyageNo', 'ObservedDistance', 'FWDDraft', 'LOG_DISTANCE',
'WindForce', 'SeaDir', 'SwellHeight', 'CurrentDirection', 'WindDirection', 'SeaHeight',
'SwellDir', 'SeaState', 'Swell', 'Current', 'FuelType', 'AuxEngineConsumption',
'BoilerEngineConsumption', 'Units', 'Received', 'Consumption', 'SeaTemp', 'VesselCondition']]
# Initializing the rows list in which we will append the mapped data
rows = []
# Looping over the dataframe to map each row to the corresponding columns in the other dataframe
for index, row in df_target.iterrows():
# Kindly note that the time complexity of the below code higher than the optimum as the same data values are
# repeated for 4 rows in a succession but since the time is not an issue in our particular use case we can
# refrain from adding further validations to complicate the code
s_imo = row['ImoNumber']
s_vesselname = row['VesselName']
s_time = row['ReportTime']
s_longitutde = row['Longitude']
s_port = row['Port']
s_latitude = row['Latitude']
s_voyage = row['VoyageNo']
s_obvdis = row['ObservedDistance']
s_draught = row['FWDDraft']
s_dist = row['LOG_DISTANCE']
s_wind = row['WindForce']
s_seadir = row['SeaDir']
s_swellhgt = row['SeaDir']
s_curdir = row['CurrentDirection']
s_windir = row['WindDirection']
s_seahgt = row['SeaHeight']
s_swelldir = row['SwellDir']
s_seastate = row['SeaState']
s_swell = row['Swell']
s_curr = row['Current']
s_units = row['Units']
s_seatemp = row['SeaTemp']
s_vesscon = row['VesselCondition']
# Filling in the corresponding fields for the repective fuel types:
if row['FuelType'] == 'IFO':
s_hshfo_ae = row['AuxEngineConsumption']
s_hshfo_blr = row['BoilerEngineConsumption']
s_hshfo_me = row['Consumption']
elif row['FuelType'] == 'LSF':
s_lshfo_ae = row['AuxEngineConsumption']
s_lshfo_blr = row['BoilerEngineConsumption']
s_lshfo_me = row['Consumption']
elif row['FuelType'] == 'LSG':
s_lsmdo_ae = row['AuxEngineConsumption']
s_lsmdo_blr = row['BoilerEngineConsumption']
s_lsmdo_me = row['Consumption']
elif row['FuelType'] == 'MGO':
s_hsmdo_ae = row['AuxEngineConsumption']
s_hsmdo_blr = row['BoilerEngineConsumption']
s_hsmdo_me = row['Consumption']
# Since MGO is the last fuel type for a particular report, we append the mapped values to the
# other dataframe
rows.append({'Vessel_Name': s_vesselname, 'Report_Date': s_time, 'IMO_NO': s_imo,
'Main Engine Fuel Consumption (H.S.HFO)': s_hshfo_me,
'Main Engine Fuel Consumption (L.S.HFO)': s_lshfo_me,
'Main Engine Fuel Consumption (H.S.MDO)': s_hsmdo_me,
'Main Engine Fuel Consumption (L.S.MDO)': s_lsmdo_me,
'Boiler Consumption (H.S.HFO)': s_hshfo_blr,
'Boiler Consumption (L.S.HFO)': s_lshfo_blr,
'Boiler Consumption (H.S.MDO)': s_hsmdo_blr,
'Boiler Consumption (L.S.MDO)': s_lsmdo_blr,
'Auxiliary Engine (Diesel Generator ) (H.S.HFO)': s_hshfo_ae,
'Auxiliary Engine (Diesel Generator ) (L.S.HFO)': s_lshfo_ae,
'Auxiliary Engine (Diesel Generator ) (H.S.MDO)': s_hsmdo_ae,
'Auxiliary Engine (Diesel Generator ) (L.S.MDO)': s_lsmdo_ae,
'Vessel State( Loaded\Ballast)': s_vesscon, 'True Wind Direction ': s_windir,
'True Wave Direction': s_seadir, 'True Swell Direction': s_swelldir})
# Creating the final dataframe with the mapped data and using the column values mapped from the semark sheet
df_final = pd.DataFrame(rows, columns=df_init_sea.columns)
# Exporting the data to excel sheet
df_final.to_excel('final.xlsx', index=False)
######################################################################################################
# The below piece of code is for xml to pandas dataframe conversion.
# Kindly note that all the fields have not yet been added to the dictionary
# xtree = et.parse("vess_test.xml")
# xroot = xtree.getroot()
#
# df_cols = ["VesselName", "ReportTime", "Longitude", "Port", "IMO_NUMBER"]
# rows = []
#
# for node in xroot:
# # s_name = node.attrib.get("name")
# s_vess_name = node.find("VesselName").text if node is not None else None
# s_report_time = node.find("ReportTime").text if node is not None else None
# s_longitude = node.find("Longitude").text if node is not None else None
# s_port = node.find("Port").text if node is not None else None
# s_imo = node.find("IMO_NUMBER").text if node is not None else None
# # s_location = node.find("Location").text if node is not None else None
#
# rows.append({"VesselName": s_vess_name, "ReportTime": s_report_time, "Longitude":s_longitude,
# "Port": s_port, "IMO_NUMBER": s_imo})
#
# out_df = pd.DataFrame(rows, columns=df_cols)
#
# print(out_df.head(10))
| 6,230 | 2,191 |
import discord, jishaku
from discord.ext import commands
from time import sleep
hid=666317117154525185
did=676454199742955530
lid=701254727534510129
owners=[hid,did,lid]
status="— ୨୧ 𝐬𝐧𝐮𝐠𝐠𝐥𝐢𝐧’ 𝐭𝐡𝐞 𝐜𝐮𝐭𝐢𝐞 𝐩𝐢𝐞𝐬! ₓ˚. ୭ ˚○◦"
join="""\U00002601 . . . ⇢ ˗ˏˋ <@&689140834200846374> ࿐ྂ
**welcome sweetheart!! please verify to gain access to the rest of the server!** <:b_powheart:727644834265038918> <:b_teddy:727644836819107860> <:b_powheart:727644834265038918>
<:b_wingies2:727644834806104124> **get some roles in** <a:b_arrow:727644833597882459> <#650563103699763240>
<:b_wingies2:727644834806104124> **make an intro in** <a:b_arrow:727644833597882459> <#650562789546655790>
<:b_wingies2:727644834806104124> **read and react to the triggers and rules list** <a:b_arrow:727644833597882459> <#662158949239226388> + <#668220102482722821>
<:b_wingies2:727644834806104124> **ping staff in** <a:b_arrow:727644833597882459> <#694558376029454386>
<a:b_butterflies:727644835023945778> — **and have loads of fun, $USER!**"""
leave= """<a:B4562AEA046F4DB6B1892479B9ADA72D:727644835023945778> — **oh no!! an angel named $USER left us :c god speed little angel. god speed.** <:5CD871E9E3E34685A9E579DA3BC0D982:727644834265038918>"""
welcomechan=650560380271067148
color=0xf8dfea
def isown(usr):
if usr.id in owners:
return True
else:
return False
bot = commands.Bot(command_prefix='~',owner_ids=owners)
bot.remove_command('help')
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name=status), status=discord.Status('online'))
@bot.event
async def on_member_join(member):
with open('./app/join.gif', 'rb') as fp:
await bot.get_channel(welcomechan).send(content=join.replace("$USER",member.mention),file=discord.File(fp,"join.gif"))
@bot.event
async def on_member_remove(member):
with open('./app/leave.gif', 'rb') as fp:
await bot.get_channel(welcomechan).send(content=leave.replace("$USER",f"@{member.name}#{member.discriminator}"),file=discord.File(fp,"leave.gif"))
@bot.event
async def on_message(message):
if ("h " in message.content.lower() or "hh" in message.content.lower() or message.content.lower()=="h") and message.author.id==hid:
await message.channel.send(content="h")
await bot.process_commands(message)
@bot.command(name='join')
@commands.is_owner()
async def _join(ctx):
with open('./app/join.gif', 'rb') as fp:
await bot.get_channel(welcomechan).send(content=join.replace("$USER",ctx.author.mention),file=discord.File(fp,"join.gif"))
await ctx.send(content="Done!")
@bot.command(name='leave')
@commands.is_owner()
async def _leave(ctx):
with open('./app/leave.gif', 'rb') as fp:
await bot.get_channel(welcomechan).send(content=leave.replace("$USER",f"@{ctx.author.name}#{ctx.author.discriminator}"),file=discord.File(fp,"leave.gif"))
await ctx.send(content="Done!")
@bot.command(name='say')
@commands.is_owner()
async def _say(ctx, *, arg):
await ctx.send(content=arg)
await ctx.message.delete()
@bot.command()
@commands.is_owner()
async def tst(ctx):
await ctx.send(content=join)
@bot.command(name='fetchmsg')
@commands.is_owner()
async def _msg(ctx, arg):
arg=int(arg)
m=await ctx.channel.fetch_message(arg)
await ctx.send(content=f"\U00000060\U00000060\U00000060{m.content}\U00000060\U00000060\U00000060")
bot.load_extension('jishaku')
bot.load_extension("utils")
bot.load_extension("misc")
bot.load_extension("voice")
bot.run('BOT_TOKEN_HERE') | 3,506 | 1,778 |
from multiprocessing import Queue, Process, Pool, Manager, Pipe
from time import sleep
def basic_usage():
q = Queue(3) # 指定队列大小,如果不写默认无限
q.put('消息1')
q.put('消息2')
q.put('消息3')
# q.put('消息4') # 一直等待直到进入
if not q.full():
q.put('消息5', block=True, timeout=1) # 等待1s,如果还没有put成功,直接抛异常
print('判断队列是否已满: %s' % q.full())
print(q.get()) # 获取并删除
print(q.get())
print(q.get())
# print(q.get()) # 一直等待获取
if not q.empty():
print(q.get(block=True,timeout=1)) # 等待获取,超时1s,则抛异常
print('判断队列是否为空: %s' % q.empty())
# print('队列大小 %d' % q.qsize()) # qsize error in mac osx
'''
' 队列中通信
' 如果使用Pool创建进程,需要使用 Manager中的Queue来完成进程间的通信
' 如果使用Process,则使用multiprocessing.Queue
'''
def write(q:Queue):
a = ['a', 'b', 'c', 'd']
for i in a:
print('is writing %s' % i )
q.put(i)
sleep(1)
def read(q:Queue):
for i in range(4):
print('is redding %s' % q.get())
sleep(1)
def queue_usage():
# 进程的通信
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
pw.start()
pr.start()
pw.join()
pr.join()
# 进程池
q = Manager().Queue()
pool = Pool(3)
pool.apply(write, (q,))
pool.apply(read, (q,))
pool.close()
'''
' pip u管道的使用
'''
def func_pipe(conn):
conn.send('send by child')
print('child recv:', conn.recv())
conn.close()
def pipe_usage():
parent_conn, child_conn = Pipe() # 获得 Pipe 连接的两端
p = Process(target=func_pipe, args=(child_conn, ))
p.start()
print('parent recv:', parent_conn.recv())
parent_conn.send('send by parent')
p.join()
if __name__ == '__main__':
# basic_usage()
pipe_usage()
| 1,611 | 810 |
# -*- coding: utf-8 -*-
"""
app.blueprints.auth.email
~~~~~~~~~~~~~~~~~~~~~~~~~
The auth routes for spa-base.
"""
from flask import current_app, render_template
from app.mail import send_mail
def send_email_verification_mail(user, email):
token = email.verification_token
send_mail('[SPA-Base] Please Verify Your Email',
sender=current_app.config['SERVER_EMAIL'], recipients=[str(email)],
text_body=render_template('auth/email/email_verification.txt',
user=user, token=token),
html_body=render_template('auth/email/email_verification.html',
user=user, token=token))
def send_password_reset_mail(user):
token = user.password_reset_token
send_mail('[SPA-Base] Reset Your Password',
sender=current_app.config['SERVER_EMAIL'], recipients=[str(user.email)],
text_body=render_template('auth/email/password_reset.txt',
user=user, token=token),
html_body=render_template('auth/email/password_reset.html',
user=user, token=token))
def send_email_not_found_mail(email):
send_mail('[SPA-Base] Email Not Registered',
sender=current_app.config['SERVER_EMAIL'], recipients=[email],
text_body=render_template('auth/email/email_not_found.txt'),
html_body=render_template('auth/email/email_not_found.html'))
| 1,504 | 428 |
#!/usr/bin/python
import redis
import sys
import hmac
import hashlib
import json
import decimal
import urllib2
import time
import argparse
import os
from settings import *
r = redis.StrictRedis(host='localhost', port=6379, db=0)
print "using "+os.environ['EVENT_SECRET_KEY']
parser = argparse.ArgumentParser(description='Write sensor value to remote server.')
parser.add_argument('--host', help='remote host to send updates to')
args = parser.parse_args()
# move all failed_readings back onto the sensor_readings list
failed_count = 0
while True:
failed_msg = r.rpoplpush('failed_readings', 'sensor_readings')
if failed_msg is None:
break
else:
failed_count += 1
if failed_count > 0:
print("Recovered "+str(failed_count)+" readings")
else:
print("No cleanup necessary from previous run")
# take all the current items from the redis list
#sensor_readings = r.lrange('sensor_readings',0,-1)
# each of these results is a json string
while True:
reading_json = r.rpoplpush('sensor_readings', 'failed_readings')
if reading_json is None:
break;
#{"float": "27.0", "monitor": "dajeil.BMP085.temp.1", "time": 1379569873.699045}
print "Raw JSON is: "+reading_json
reading = json.loads(reading_json, parse_float=decimal.Decimal)
# This second parse is temporary, as some early values were loaded as strings, and this serves to strip them
fv = str(reading["float"])
monitor = reading["monitor"].strip()
epoch = reading["time"]
# create json with new format
data = json.dumps({"type": "float", "value": fv, "monitor": monitor, "time": float(epoch)})
print data
authz = "HMAC "+hmac.new(os.environ['EVENT_SECRET_KEY'],data,hashlib.sha256).hexdigest()
req = urllib2.Request(args.host, data, {'Content-Type': 'application/json', 'Authorization': authz})
f = urllib2.urlopen(req)
response = f.read()
print response
f.close()
# Commit completed, drop the message from redis
r.lrem('failed_readings', 1, reading_json)
#print r.lrange('sensor_readings',0,-1)
| 2,008 | 692 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paying_for_college', '0009_expandable_group_help_text'),
]
operations = [
migrations.AddField(
model_name='program',
name='median_monthly_debt',
field=models.IntegerField(blank=True, help_text='MEDIAN MONTHLY PAYMENT FOR A 10-YEAR LOAN', null=True),
),
]
| 458 | 155 |
from grifo.snippets.models import Snippet, SnippetCategory
from rest_framework import viewsets
from grifo.snippets.serializers import SnippetSerializer, SnippetCategorySerializer
class SnippetViewSet(viewsets.ModelViewSet):
serializer_class = SnippetSerializer
queryset = Snippet.objects.all()
lookup_field = 'id'
class SnippetCategoryViewSet(viewsets.ModelViewSet):
serializer_class = SnippetCategorySerializer
queryset = SnippetCategory.objects.all()
lookup_field = 'id'
| 502 | 156 |
import os
import sys
if(len(sys.argv) != 2):
print("Error: usage --> python3 lstdir.py [DIRNAME]")
exit(0)
path = sys.argv[1]
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.png' in file:
files.append(os.path.join(r, file))
#files.append(file)
for f in files:
print(f)
| 374 | 145 |
import pandas as pd
import requests
ratings = pd.read_csv("processed_ratings.csv")
# fetch movie details of all the unique movieids from the movie API
movie_api_url = "http://128.2.204.215:8080/movie/"
movies = [requests.get(movie_api_url + movie).json() for movie in ratings['movieid'].unique()]
# filter out records for which movie details does not exist
movies = list(filter(lambda x: x.get('message','None') == 'None', movies))
# convert JSON data to dataframe
movies = list(map(pd.io.json.json_normalize, movies))
movies_data = pd.concat(movies).reset_index(drop=True)
movies_data = movies_data.drop_duplicates(subset=['id'])
# keep only important columns
movies_data = movies_data[['id','imdb_id','title','adult','budget','genres','original_language','release_date','vote_count','vote_average','popularity','overview']]
# preprocess the genres column to make it readable
movies_data['genres'] = movies_data['genres'].apply(lambda x: ",".join([y['name'] for y in x]))
movies_data.to_csv("movies.csv", index=False) | 1,022 | 343 |
import functools
def chaining(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
return self
return wrapper
| 182 | 62 |
from flask_restly._storage import push_metadata
import wrapt
def body(outgoing=None, incoming=None):
def decorator(func):
@wrapt.decorator
def wrapper(wrapped, _, args, kwargs):
return wrapped(*args, **kwargs)
wrapped_func = wrapper(func)
push_metadata(wrapped_func, {
'incoming': incoming,
'outgoing': outgoing,
})
return wrapped_func
return decorator
| 450 | 131 |
import sys
import os
cwd = os.path.dirname(os.path.realpath(__file__))
main_dir = os.path.normpath(cwd + '/../')
sys.path.append(main_dir)
#print sys.path
from config.all import *
language = 'en'
#html_logo = '../images/logos/logo-en.png'
latex_logo = '../images/logos/logo-en.png'
latex_documents = [
('index', 'e-cidadania.tex', u'Documentation',
u'Cidadania S. Coop. Galega', 'manual'),
] | 400 | 162 |
import unittest
from datanator.data_source import ec
import datanator.config.core
import shutil
import tempfile
from pathlib import Path
class TestEC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dir = tempfile.mkdtemp()
db = 'test'
username = datanator.config.core.get_config()['datanator']['mongodb']['user']
password = datanator.config.core.get_config()['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config()['datanator']['mongodb']['server']
cls.src = ec.EC(server=MongoDB, db=db, username=username, password=password, authSource='admin',
readPreference='nearest', max_entries=20, cache_dir=cls.cache_dir)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.cache_dir)
cls.src.db.drop_collection(cls.src.collection_str)
cls.src.client.close()
@unittest.skip('IP')
def test_establish_ftp(self):
ftp = self.src.establish_ftp()
self.assertTrue('enzyme.dat' in ftp.nlst())
@unittest.skip('IP')
def test_retrieve_content(self):
p = Path(self.cache_dir+'/enzyme.dat')
self.src.retrieve_content()
self.assertTrue(p.exists())
@unittest.skip('circle directory error.')
def test_parse_content(self):
location = str(Path('~/karr_lab/datanator/docs/enzyme.dat').expanduser())
self.src.parse_content(location)
def test_make_doc(self):
lines = ["ID 1.1.1.1", "DE Alcohol dehydrogenase.", "AN Aldehyde reductase.",
"CA (1) A primary alcohol + NAD(+) = an aldehyde + NADH.", "CA (2) A secondary alcohol + NAD(+) = a ketone + NADH.",
"CF Zn(2+) or Fe cation."]
result = self.src.make_doc(lines)
self.assertEqual(result, {'ec_number': '1.1.1.1', 'ec_name': 'Alcohol dehydrogenase',
'ec_synonyms': ['Aldehyde reductase'],
'catalytic_activity': ['(1) A primary alcohol + NAD(+) = an aldehyde + NADH', '(2) A secondary alcohol + NAD(+) = a ketone + NADH'],
'cofactor': 'Zn(2+) or Fe cation'}) | 2,191 | 749 |
def partition(A, left, right): # O(N)
pivot = A[right] # O(1)
i = left # O(1)
for j in range(left, right): # O(N)
if A[j] <= pivot: # O(1)
A[i], A[j] = A[j], A[i] # O(1)
i += 1 # O(1)
A[i], A[right] = A[right], A[i] # O(1)
return i # O(1)
def quick_sort(A, left, right): # O(NlogN)
if left < right: # O(1)
index = partition(A, left, right) # O(N)
quick_sort(A, left, index - 1) # O(logN)
quick_sort(A, index + 1, right) # O(logN)
def solution(A): # O(NlogN)
"""
Sort numbers in list A using quick sort.
>>> solution([5, 2, 2, 4, 1, 3, 7, 9])
[1, 2, 2, 3, 4, 5, 7, 9]
>>> solution([2, 4, 6, 2, 0, 8])
[0, 2, 2, 4, 6, 8]
>>> solution([1, 3, 5, 7, 3, 9, 1, 5])
[1, 1, 3, 3, 5, 5, 7, 9]
"""
length = len(A) # O(1)
quick_sort(A, 0, length - 1) # O(NlogN)
return A # O(1)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 1,710 | 542 |
from setuptools import setup, find_packages
exec(open("trio_inspector/_version.py", encoding="utf-8").read())
LONG_DESC = open("README.md", encoding="utf-8").read()
setup(
name="trio-inspector",
version=__version__,
description="A browser-based monitor for Trio",
url="https://github.com/syncrypt/trio-inspector/",
long_description=open("README.md").read(),
author="Hannes Gräuler",
author_email="hannes@syncrypt.space",
license="MIT -or- Apache License 2.0",
packages=find_packages(),
package_data={
'trio_inspector': [
'static/main.js',
'static/index.html',
'static/style.css'
]
},
install_requires=[
"trio",
"trio-typing",
"hypercorn",
"quart-trio",
"quart",
"quart-cors"
],
keywords=[
# COOKIECUTTER-TRIO-TODO: add some keywords
# "async", "io", "networking", ...
],
python_requires=">=3.7",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Trio",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Software Development :: Debuggers",
],
)
| 1,691 | 529 |
""" Formula and related objects """
from .formulae import (Formula, Term, terms, Factor, make_recarray,
natural_spline)
| 143 | 39 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
self.seen = {}
return self.dfs(1, n)
def dfs(self, start, end):
if start > end:
return [None]
if (start, end) in self.seen:
return self.seen[(start, end)]
if start == end:
node = TreeNode(start)
self.seen[(start, end)] = [node]
return self.seen[(start, end)]
cur = []
for i in range(start, end + 1):
left = self.dfs(start, i - 1)
right = self.dfs(i + 1, end)
for l in left:
for r in right:
node = TreeNode(i)
node.left = l
node.right = r
cur.append(node)
self.seen[(start, end)] = cur
return cur
| 1,119 | 327 |
import numpy as np
from faps.genotypeArray import genotypeArray
from faps.calculate_geno_probs import calculate_geno_probs
def make_offspring(parents, noffs=None, dam_list=None, sire_list=None, mu=1e-12, family_name='offs'):
"""
Mate individuals in a base population to create simulated offspring. Lists of
specific sires and dams can be provided with the options dam_list and
sire_list. If only the number of offspring are specified parents are mated at
random from the base population.
Parameters
----------
parents: genotypeArray
Genotype information on the parents to be mated.
noffs: int
Number of offspring to be produced. If specific dams and sires are
specified, this is ignored.
dam_list, sire_list: lists
Integer lists of positions of sires and dams to be mated.
Pairs are mated in order (i.e. the first dam with the first sire, and so
forth). If used these two lists must be of the same length. If no
arguments are given for either list, parents are mated at random with
replacement, and the possibility of self-fertilisation.
mu: float or 1-d array between 0 and 1
Per locus genotype error rate; the probability that the called
genotype is incorrect. Alternatively, supply a vector of error rates
for each locus. Defaults to 1e-12.
family_name: str, optional
String denoting the name for this family.
Returns
-------
A genotypeArray object.
"""
if dam_list is None and sire_list is None and noffs is None:
raise ValueError("Either noffs needs to be a positive integer, or else lists of dams and sires should be given.")
# If parents haven't been specified, choose these at random.
if dam_list is None and sire_list is None:
if noffs < 1 or not isinstance(noffs, int):
raise ValueError("noffs should be a positive integer.")
nparents = parents.geno.shape[0]
dam_list = np.random.choice(range(nparents), noffs, replace=True).tolist()
sire_list = np.random.choice(range(nparents), noffs, replace=True).tolist()
# if parents have been specified, set noffs to the length of sires and dams.
if dam_list is not None or sire_list is not None:
noffs = len(dam_list)
if len(dam_list) != len(sire_list):
raise ValueError("List of sires must be the same length as the list of dams.")
nloci = parents.geno.shape[1] # pull out the number of loci
offs_genotypes= np.zeros([noffs, nloci, 2]) # empty array to store offspring genotypes.
# pull out arrays of genotype data for the dams and sires.
dam_genotypes = parents.subset(dam_list).geno
sire_genotypes = parents.subset(sire_list).geno
# draw an array of indices for whether the first or second allele should be drawn.
dam_alleles = np.random.binomial(1, 0.5, nloci*noffs).reshape([noffs, nloci])
sire_alleles = np.random.binomial(1, 0.5, nloci*noffs).reshape([noffs, nloci])
# loop over every mating pair and send the selected alleles to offs_genotypes.
for o in range(noffs):
offs_genotypes[o,:,0] = np.array([dam_genotypes [o,l][dam_alleles [o,l]] for l in range(nloci)])
offs_genotypes[o,:,1] = np.array([sire_genotypes[o,l][sire_alleles[o,l]] for l in range(nloci)])
offs_genotypes = offs_genotypes.astype(float)
# extra information on names.
offspring_names = np.array([family_name+'_'+str(a) for a in np.arange(noffs)])
maternal_names = parents.subset(dam_list).names
paternal_names = parents.subset(sire_list).names
geno_probs = calculate_geno_probs(offs_genotypes, mu)
return genotypeArray(
geno = offs_genotypes,
geno_probs = geno_probs,
names = offspring_names,
mothers = maternal_names,
fathers = paternal_names,
markers = np.arange(nloci)
)
| 3,922 | 1,277 |
from app.server.models.user import User
def test_get_all_users(test_client, activated_admin_user):
"""
GIVEN a flask application
WHEN a GET request sent to '/api/v1/user/' with a valid auth token
THEN check all users are returned.
"""
authentication_token = activated_admin_user.encode_auth_token().decode()
response = test_client.get(
"/api/v1/user/",
headers={
"Authorization": f"Bearer {authentication_token}",
"Accept": "application/json",
},
content_type="application/json",
)
assert response.status_code == 200
if response.status_code == 200:
assert len(response.json["data"]["users"]) == len(User.query.all())
def test_get_single_user(test_client, activated_admin_user, activated_client_user):
"""
GIVEN a flask application
WHEN a GET request is sent to '/api/v1/user/<int:user_id>/' by a client or admin user.
THEN check that the specific user matching the id is returned.
"""
for user in [activated_admin_user, activated_client_user]:
authentication_token = user.encode_auth_token().decode()
response = test_client.get(
f"/api/v1/user/{user.id}/",
headers={
"Authorization": f"Bearer {authentication_token}",
"Accept": "application/json",
},
content_type="application/json",
)
assert response.status_code == 200
if response.status_code == 200:
assert response.json["data"]["user"]["given_names"] == user.given_names
# TODO: [Philip] Refactor code to accommodate user edits
def test_delete_user(test_client, activated_admin_user):
"""
GIVEN a flask application
WHEN a DELETE request is sent to '/user/<int:user_id>/'
THEN check that the delete user data is absent from the db.
"""
authentication_token = activated_admin_user.encode_auth_token().decode()
response = test_client.delete(
f"/api/v1/user/{activated_admin_user.id}/",
headers={
"Authorization": f"Bearer {authentication_token}",
"Accept": "application/json",
},
content_type="application/json",
)
assert response.status_code == 200
if response.status_code == 200:
assert (User.query.get(activated_admin_user.id)) is None
| 2,362 | 701 |
from ipaddress import IPv4Address, IPv6Address, IPv6Network
from contextlib import contextmanager
import tempfile
import os
import random
import unittest
import requests
import iprir
from iprir.record import RIRRecord, ip_to_int
from iprir.parser import parse_file, parse_string
from iprir.database import DB
from iprir.ipset import IpSet
import iprir.updater
SAMPLE_TEXT_DB_CONTENT = '''
#
2|apnic|20170120|50186|19830613|20170119|+1000
apnic|*|asn|*|7517|summary
apnic|*|ipv4|*|36581|summary
apnic|*|ipv6|*|6088|summary
apnic|NZ|asn|681|1|20020801|allocated
apnic|AU|ipv4|1.0.0.0|256|20110811|assigned
apnic|CN|ipv4|1.0.1.0|256|20110414|allocated
apnic|CN|ipv6|2001:250::|35|20000426|allocated
apnic|CN|ipv6|2001:250:2000::|35|20020726|allocated
'''
REAL_RECORDS = None
# noinspection PyPep8Naming
def setUpModule():
global REAL_RECORDS
iprir.updater.initialize()
REAL_RECORDS = sum(map(parse_file, iprir.TEXT_DB_PATH.values()), [])
@contextmanager
def patch(obj, key, value):
origin = getattr(obj, key)
setattr(obj, key, value)
try:
yield
finally:
setattr(obj, key, origin)
@contextmanager
def patch_db_path():
fd, text_db_path = tempfile.mkstemp(prefix='iprir_test_', suffix='.txt')
os.close(fd)
fd, sql_db_path = tempfile.mkstemp(prefix='iprir_test_', suffix='.sqlite')
os.close(fd)
print('text_db_path', text_db_path)
print('sql_db_path', sql_db_path)
with patch(iprir, 'TEXT_DB_PATH', dict(test=text_db_path)):
with patch(iprir, 'TEXT_DB_URLS', dict(test='https://dummy/')):
with patch(iprir, 'SQL_DB_PATH', sql_db_path):
try:
yield text_db_path, sql_db_path
except Exception:
raise
else:
os.remove(text_db_path)
os.remove(sql_db_path)
def write_string_to_file(filename: str, string: str):
with open(filename, 'wt') as fp:
fp.write(string)
def test_record_ipv4():
r = RIRRecord('CN', 'ipv4', '1.0.1.0', '256', 'assigned')
assert r.length == 256
assert r.ipv4.exploded == '1.0.1.0'
assert r.ipv4_network.network_address == r.ipv4
assert r.ipv4_network.prefixlen == 24
assert r.ipv4 == IPv4Address(r.as_int)
def test_record_ipv6():
r = RIRRecord('CN', 'ipv6', '2001:250::', '35', 'allocated')
assert r.length == 2 ** (128 - 35)
assert r.ipv6.compressed == '2001:250::'
assert r.ipv6_network.network_address == r.ipv6
assert r.ipv6_network.prefixlen == 35
assert r.ipv6 == IPv6Address(r.as_int)
def test_parse():
records = parse_string(SAMPLE_TEXT_DB_CONTENT)
assert len(records) == 5
r = records[-1]
assert (r.country, r.ipv6, r.ipv6_network, r.status) == (
'CN',
IPv6Address('2001:250:2000::'),
IPv6Network('2001:250:2000::/35'),
'allocated'
)
def test_ip_overlap():
def verify(lst):
lst.sort(key=lambda x: x[0])
for i in range(1, len(lst)):
prev_start, prev_len = lst[i - 1]
assert prev_start + prev_len <= lst[i][0]
lst4 = []
lst6 = []
for r in REAL_RECORDS:
if r.country == 'AP': # asia/pacific
# XXX: conflicts
# apnic|AP|ipv4|159.117.192.0|2048|19920409|allocated|A928972C
# ripencc|NL|ipv4|159.117.192.0|2048|19920409|assigned|
continue
if not DB.filter_record(r):
continue
if r.type == 'ipv4':
lst4.append((r.as_int, r.length))
elif r.type == 'ipv6':
lst6.append((r.as_int, r.length))
verify(lst4)
verify(lst6)
def test_db():
with patch_db_path() as pathes:
text_db_path, sql_db_path = pathes
write_string_to_file(text_db_path, SAMPLE_TEXT_DB_CONTENT)
records = parse_file(text_db_path)
db = DB()
try:
ret = db.reset_table()
assert ret
ret = db.add_records(records)
assert ret
cn4 = db.by_country('ipv4', 'CN')
assert len(cn4) == 1
assert cn4[0] == records[2]
cn6 = db.by_country('ipv6', 'CN')
assert len(cn6) == 2
assert cn6 == records[3:5]
r = db.by_ip(IPv4Address('1.0.1.0'))
assert r == records[2]
r = db.by_ip(IPv4Address('1.0.1.255'))
assert r == records[2]
r = db.by_ip(IPv4Address('1.0.2.0'))
assert r is None
r = db.by_ip(IPv6Address('2001:250::'))
assert r == records[3]
net = records[3].ipv6_network
r = db.by_ip(net.network_address + net.num_addresses)
assert r == records[4]
net = records[4].ipv6_network
r = db.by_ip(net.network_address + net.num_addresses)
assert r is None
finally:
db.close()
def test_update():
def fake_get(*args, **kwargs):
class Obj:
pass
o = Obj()
o.text = SAMPLE_TEXT_DB_CONTENT
return o
with patch(requests, 'get', fake_get):
with patch_db_path():
iprir.updater.update()
db = DB()
try:
records = parse_string(SAMPLE_TEXT_DB_CONTENT)
records = list(filter(lambda r: r.type in ('ipv4', 'ipv6'), records))
assert db.all() == records
finally:
db.close()
def test_ipset():
def to_int(ips):
return [ip_to_int(IPv4Address(ip)) for ip in ips]
text = '''
2|apnic|20170120|50186|19830613|20170119|+1000
apnic|*|ipv6|*|6088|summary
apnic|AU|ipv4|1.0.0.0|256|20110811|assigned
apnic|CN|ipv4|1.0.1.0|256|20110414|allocated
apnic|CN|ipv4|1.0.5.0|256|20110414|allocated
'''
records = parse_string(text)
random.shuffle(records)
ipset = IpSet(records)
assert ipset.lo == to_int(['1.0.0.0', '1.0.5.0'])
assert ipset.hi == to_int(['1.0.2.0', '1.0.6.0'])
assert IPv4Address('0.255.255.255') not in ipset
assert IPv4Address('1.0.0.0') in ipset
assert IPv4Address('1.0.1.0') in ipset
assert IPv4Address('1.0.1.255') in ipset
assert IPv4Address('1.0.2.0') not in ipset
assert IPv4Address('1.0.4.255') not in ipset
assert IPv4Address('1.0.5.0') in ipset
assert IPv4Address('1.0.5.255') in ipset
assert IPv4Address('1.0.6.0') not in ipset
# test IpSet.by_country()
with patch_db_path() as pathes:
text_db_path, sql_db_path = pathes
write_string_to_file(text_db_path, text)
iprir.updater.update_sql_db()
ipset = IpSet.by_country('ipv4', 'CN')
assert ipset.lo == to_int(['1.0.1.0', '1.0.5.0'])
assert ipset.hi == to_int(['1.0.2.0', '1.0.6.0'])
class TestIpSetOnRealData(unittest.TestCase):
by_country = staticmethod(IpSet.by_country)
def test_by_country(self):
# test on real data
cn4 = self.by_country('ipv4', 'CN')
assert IPv4Address('1.2.4.8') in cn4
assert IPv4Address('111.13.101.208') in cn4
assert IPv4Address('112.124.47.27') in cn4
assert IPv4Address('74.125.68.105') not in cn4
class TestRealDataWithApi(TestIpSetOnRealData):
by_country = staticmethod(iprir.by_country)
def test_by_ip(self):
assert iprir.by_ip(IPv4Address('8.8.8.8')) == RIRRecord(
country='US', type='ipv4', start='8.0.0.0', value='16777216', status='allocated',
)
# noinspection PyPep8Naming
def tearDownModule():
iprir.get_db().close()
| 7,576 | 3,148 |
import sys
with open(sys.argv[1], "r") as file:
entries = file.read().splitlines()
open_chars = ['(', '[', '{', '<']
close_chars = [')', ']', '}', '>']
char_map = {o:c for o, c in zip(open_chars, close_chars)}
points = {')': 3, ']':57, '}':1197, '>': 25137}
syntax_score = 0
acp_scores = []
for line in entries:
levels = []
for c in line:
if c in open_chars:
levels.append(c)
elif c == char_map[levels[-1]]:
levels.pop()
else:
syntax_score += points[c]
break
else:
score = 0
for l in reversed(levels):
score = score * 5 + 1 + open_chars.index(l)
acp_scores.append(score)
acp_score = sorted(acp_scores)[len(acp_scores)//2]
print('Answer 1:', syntax_score)
print('Answer 2:', acp_score)
| 820 | 327 |
#!/usr/bin/env python3
import sys
import argparse
import os
import os.path
import re
regex = re.compile(r'^(\d+):(\d\d):(\d\d)\.(\d+)$')
alt_regex = re.compile(r'^(\d+):(\d\d):(\d\d)$')
count = 0
total = 0.0
_max = 0.0
_min = 100000
minimum_considered = 1.0
excluded = 0
for line in sys.stdin:
if count == 1000000:
break
m = regex.match(line.strip())
if m:
hour, minute, second, frac = m.groups()
else:
m = alt_regex.match(line.strip())
if m:
hour, minute, second = m.groups()
frac = "000000"
else:
sys.stderr.write("ERROR: line {} in {} failed regex\n".format(line.strip(), stats_file))
continue
total_seconds = float('0.{}'.format(frac)) + float(second) + (float(minute) * 60.0) + (float(hour) * 60.0 * 60.0)
if total_seconds < minimum_considered:
excluded += 1
continue
total += total_seconds
count += 1
if total_seconds > _max:
_max = total_seconds
if total_seconds < _min:
_min = total_seconds
if count:
print("total {} averge {:.2f} max {:.2f} min {:.2f} (excluded {})".format(count, total / float(count), _max, _min, excluded))
| 1,207 | 465 |
from tetris.board import generate_empty_board
from tetris import board, tetromino
import numba
from numba import jitclass, bool_, int64
from tetris.utils import print_tetromino, print_board_to_string
# Needed for numba.@jitclass
specTetris = [
('num_columns', int64),
('num_rows', int64),
('feature_type', numba.types.string),
('num_features', int64),
('game_over', bool_),
('current_board', board.Board.class_type.instance_type),
('current_tetromino', tetromino.Tetromino.class_type.instance_type),
('cleared_lines', int64)
]
@jitclass(specTetris)
class Tetris:
"""
Tetris for reinforcement learning applications.
Tailored to use with a set of hand-crafted features such as "BCTS" (Thiery & Scherrer 2009)
The BCTS feature names (and order) are
['rows_with_holes', 'column_transitions', 'holes', 'landing_height',
'cumulative_wells', 'row_transitions', 'eroded', 'hole_depth']
"""
def __init__(self,
num_columns,
num_rows,
feature_type="bcts",
):
if feature_type == "bcts":
self.feature_type = feature_type
self.num_features = 8
else:
raise AssertionError("Only the BCTS feature set (Thiery & Scherrer 2009) is currently implemented.")
self.num_columns = num_columns
self.num_rows = num_rows
self.current_tetromino = tetromino.Tetromino(self.feature_type, self.num_features, self.num_columns)
self.current_board = generate_empty_board(self.num_rows, self.num_columns)
self.cleared_lines = 0
self.game_over = False
def reset(self):
self.current_board = generate_empty_board(self.num_rows, self.num_columns)
self.current_board.calc_bcts_features()
self.cleared_lines = 0
self.game_over = False
self.current_tetromino.next_tetromino()
def step(self, after_state):
self.game_over = after_state.is_terminal_state
if not self.game_over:
self.cleared_lines += after_state.n_cleared_lines
self.current_board = after_state
self.current_tetromino.next_tetromino()
return (self.current_board, self.current_tetromino), after_state.n_cleared_lines, self.game_over
def get_after_states(self):
return self.current_tetromino.get_after_states(self.current_board)
def print_current_board(self):
print(print_board_to_string(self.current_board))
def print_current_tetromino(self):
print(print_tetromino(self.current_tetromino.current_tetromino_index))
| 2,622 | 877 |
# Copyright 2020 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notifier inventory summary export tests"""
import pytest
import re
import subprocess
class TestNotifierInventorySummaryExport:
"""Tests for the notifier inventory summary export feature."""
@pytest.mark.e2e
@pytest.mark.notifier
@pytest.mark.server
def test_inventory_summary_export_gcs(
self,
forseti_notifier_readonly: subprocess.CompletedProcess,
forseti_server_bucket_name: str):
"""Test that the inventory summary is exported to GCS.
Args:
forseti_notifier_readonly (subprocess.CompletedProcess): Notifier
run process result.
forseti_server_bucket_name (str): Forseti server bucket name.
"""
match = re.search(
fr'gs://{forseti_server_bucket_name}/inventory_summary/(.*).csv',
str(forseti_notifier_readonly.stdout))
assert match
gcs_path = match.group(0)
cmd = ['sudo', 'gsutil', 'ls', gcs_path]
result = subprocess.run(cmd, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
assert result.returncode == 0
| 1,752 | 512 |
"""
MIT License
Copyright (c) 2022 Texas Tech University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
This file is part of MonSter.
Author:
Jie Li, jie.li@ttu.edu
"""
import logger
import time
import multiprocessing
log = logger.get_logger(__name__)
def partition(arr:list, cores: int):
"""partition Partition a list
Partition urls/nodes into several groups based on # of cores
Args:
arr (list): A list to be partitioned
cores (int): Number of cores of the compute running MonSter
Returns:
list: partitioned list
"""
groups = []
try:
arr_len = len(arr)
arr_per_core = arr_len // cores
arr_surplus = arr_len % cores
increment = 1
for i in range(cores):
if(arr_surplus != 0 and i == (cores-1)):
groups.append(arr[i * arr_per_core:])
else:
groups.append(arr[i * arr_per_core : increment * arr_per_core])
increment += 1
except Exception as err:
log.error(f"Cannot Partition the list: {err}")
return groups
| 2,084 | 655 |
"""ParamSklearn can be easily extended with new classification and
preprocessing methods. At import time, ParamSklearn checks the directory
``ParamSklearn/components/classification`` for classification algorithms and
``ParamSklearn/components/preprocessing`` for preprocessing algorithms. To be
found, the algorithm must be provide a class implementing one of the given
interfaces.
Coding Guidelines
=================
Please try to adhere to the `scikit-learn coding guidelines <http://scikit-learn.org/stable/developers/index.html#contributing>`_.
Own Implementation of Algorithms
================================
When adding new algorithms, it is possible to implement it directly in the
fit/predict/transform method of a component. We do not recommend this,
but rather recommend to implement an algorithm in a scikit-learn compatible
way (`see here <http://scikit-learn.org/stable/developers/index.html#apis-of-scikit-learn-objects>`_).
Such an implementation should then be put into the `implementation` directory.
and can then be easily wrapped with to become a component in ParamSklearn.
Classification
==============
The ParamSklearnClassificationAlgorithm provides an interface for
Classification Algorithms inside ParamSklearn. It provides four important
functions. Two of them,
:meth:`get_hyperparameter_search_space() <ParamSklearn.components.classification_base.ParamSklearnClassificationAlgorithm.get_hyperparameter_search_space>`
and
:meth:`get_properties() <ParamSklearn.components.classification_base.ParamSklearnClassificationAlgorithm.get_properties>`
are used to
automatically create a valid configuration space. The other two,
:meth:`fit() <ParamSklearn.components.classification_base.ParamSklearnClassificationAlgorithm.fit>` and
:meth:`predict() <ParamSklearn.components.classification_base.ParamSklearnClassificationAlgorithm.predict>`
are an implementation of the `scikit-learn predictor API <http://scikit-learn.org/stable/developers/index.html#apis-of-scikit-learn-objects>`_.
Preprocessing
============="""
from . import classification as classification_components
from . import regression as regression_components
from . import feature_preprocessing as feature_preprocessing_components
from . import data_preprocessing as data_preprocessing_components
| 2,289 | 560 |
import adv.adv_test
from core.advbase import *
from slot.d import Dreadking_Rathalos
def module():
return Yuya
class Yuya(Adv):
conf = {}
conf['slot.d'] = Dreadking_Rathalos()
conf['acl'] = """
`s3, fsc and not self.s3_buff
`s1, fsc
`fs, seq=2
"""
a3 = ('primed_crit_chance', 0.5,5)
def prerun(self):
if self.condition('hp60'):
Selfbuff('a1',0.2,-1,'att','passive').on()
else:
Selfbuff('a1',-0.2,-1,'att','passive').on()
def s1_proc(self, e):
Spdbuff("s2",0.2, 10)
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf)
| 658 | 268 |
"""
Analyzers.
"""
from elasticsearch_dsl import analyzer
from elasticsearch_dsl.analysis import token_filter
__title__ = 'django_elasticsearch_dsl_drf.analyzers'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2017-2019 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = (
'edge_ngram_completion_filter',
'edge_ngram_completion',
)
edge_ngram_completion_filter = token_filter(
'edge_ngram_completion_filter',
type="edge_ngram",
min_gram=1,
max_gram=20
)
edge_ngram_completion = analyzer(
"edge_ngram_completion",
tokenizer="standard",
filter=["lowercase", edge_ngram_completion_filter]
)
| 669 | 266 |
from glob import glob
from os.path import join
import os
from matplotlib.pyplot import hexbin
from parse import LogAggregator
from plot import Ploter
if __name__ == '__main__':
max_latencies = [2_000, 5_000] # For TPS graphs.
# Parse the results.
for system in ['3-chain', '2-chain', 'ditto-async', 'ditto-sync', 'vaba']:
[os.remove(x) for x in glob(f'{system}.*.txt')]
files = glob(join(system, 'results', '*.txt'))
LogAggregator(system, files, max_latencies).print()
LogAggregator(system, files, max_latencies, end_to_end=False).print()
# Plot 'Happy path' graph.
ploter = Ploter(width=12.8)
for system in ['3-chain', '2-chain', 'ditto-sync', 'vaba']:
ploter.plot_latency(system, [10, 20, 50], [0], 512)
ploter.finalize('happy-path', legend_cols=4)
# Plot 'Happy path TPS' graph.
ploter = Ploter()
for system in ['3-chain', '2-chain', 'ditto-sync', 'vaba']:
ploter.plot_tps(system, [0], max_latencies, 512)
ploter.finalize('happy-path-tps', legend_cols=2)
# Plot 'Happy path commit latency' graph.
ploter = Ploter()
for system in ['3-chain', '2-chain']:
ploter.plot_commit_lantecy(
system, [0], [20000], 512, graph_type='commit_latency'
)
ploter.finalize('happy-path-commit', legend_cols=2, top_lim=1_500)
# Plot 'Leader under DoS' graph.
ploter = Ploter()
for i, system in enumerate(['3-chain', '2-chain']):
name = Ploter.legend_name(system)
ploter.plot_free(
[i*500],
[0],
[f'{name}, {x} nodes' for x in [10, 20, 50]]
)
for system in ['ditto-async', 'vaba']:
ploter.plot_latency(system, [10, 20, 50], [0], 512)
ploter.finalize('leader-under-dos', legend_cols=2)
# Plot 'Dead nodes' graph.
ploter = Ploter(width=12.8)
for system in ['3-chain', '2-chain', 'ditto-sync', 'vaba']:
ploter.plot_latency(system, [20], [0, 1, 3], 512)
ploter.finalize('dead-nodes', legend_cols=4)
# Plot 'Dead nodes and DoS' graph.
ploter = Ploter()
for i, system in enumerate(['3-chain', '2-chain']):
name = Ploter.legend_name(system)
ploter.plot_free(
[i*500],
[0],
[
f'{name}, 20 nodes',
f'{name}, 20 nodes (1 faulty)',
f'{name}, 20 nodes (3 faulty)'
]
)
for system in ['ditto-async', 'vaba']:
ploter.plot_latency(system, [20], [0, 1, 3], 512)
ploter.finalize('dead-nodes-and-dos', legend_cols=2)
| 2,594 | 1,018 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'ApiExtensionDestination',
'ApiExtensionTrigger',
'CartDiscountTarget',
'CartDiscountValue',
'CartDiscountValueMoney',
'ProductTypeAttribute',
'ProductTypeAttributeType',
'ProductTypeAttributeTypeElementType2',
'ProductTypeAttributeTypeElementType2LocalizedValue',
'ProductTypeAttributeTypeLocalizedValue',
'ProjectSettingsCarts',
'ProjectSettingsExternalOauth',
'ProjectSettingsMessages',
'ProjectSettingsShippingRateCartClassificationValue',
'ShippingZoneLocation',
'ShippingZoneRateFreeAbove',
'ShippingZoneRatePrice',
'ShippingZoneRateShippingRatePriceTier',
'ShippingZoneRateShippingRatePriceTierPrice',
'SubscriptionChange',
'SubscriptionDestination',
'SubscriptionFormat',
'SubscriptionMessage',
'TaxCategoryRateSubRate',
'TypeField',
'TypeFieldType',
'TypeFieldTypeElementType2',
'TypeFieldTypeElementType2LocalizedValue',
'TypeFieldTypeLocalizedValue',
]
@pulumi.output_type
class ApiExtensionDestination(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessKey":
suggest = "access_key"
elif key == "accessSecret":
suggest = "access_secret"
elif key == "authorizationHeader":
suggest = "authorization_header"
elif key == "azureAuthentication":
suggest = "azure_authentication"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiExtensionDestination. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiExtensionDestination.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiExtensionDestination.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
access_key: Optional[str] = None,
access_secret: Optional[str] = None,
arn: Optional[str] = None,
authorization_header: Optional[str] = None,
azure_authentication: Optional[str] = None,
url: Optional[str] = None):
pulumi.set(__self__, "type", type)
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if access_secret is not None:
pulumi.set(__self__, "access_secret", access_secret)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if authorization_header is not None:
pulumi.set(__self__, "authorization_header", authorization_header)
if azure_authentication is not None:
pulumi.set(__self__, "azure_authentication", azure_authentication)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[str]:
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="accessSecret")
def access_secret(self) -> Optional[str]:
return pulumi.get(self, "access_secret")
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authorizationHeader")
def authorization_header(self) -> Optional[str]:
return pulumi.get(self, "authorization_header")
@property
@pulumi.getter(name="azureAuthentication")
def azure_authentication(self) -> Optional[str]:
return pulumi.get(self, "azure_authentication")
@property
@pulumi.getter
def url(self) -> Optional[str]:
return pulumi.get(self, "url")
@pulumi.output_type
class ApiExtensionTrigger(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceTypeId":
suggest = "resource_type_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiExtensionTrigger. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiExtensionTrigger.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiExtensionTrigger.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
actions: Sequence[str],
resource_type_id: str):
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "resource_type_id", resource_type_id)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="resourceTypeId")
def resource_type_id(self) -> str:
return pulumi.get(self, "resource_type_id")
@pulumi.output_type
class CartDiscountTarget(dict):
def __init__(__self__, *,
type: str,
predicate: Optional[str] = None):
pulumi.set(__self__, "type", type)
if predicate is not None:
pulumi.set(__self__, "predicate", predicate)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def predicate(self) -> Optional[str]:
return pulumi.get(self, "predicate")
@pulumi.output_type
class CartDiscountValue(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "distributionChannelId":
suggest = "distribution_channel_id"
elif key == "productId":
suggest = "product_id"
elif key == "supplyChannelId":
suggest = "supply_channel_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CartDiscountValue. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CartDiscountValue.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CartDiscountValue.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
distribution_channel_id: Optional[str] = None,
monies: Optional[Sequence['outputs.CartDiscountValueMoney']] = None,
permyriad: Optional[int] = None,
product_id: Optional[str] = None,
supply_channel_id: Optional[str] = None,
variant: Optional[int] = None):
pulumi.set(__self__, "type", type)
if distribution_channel_id is not None:
pulumi.set(__self__, "distribution_channel_id", distribution_channel_id)
if monies is not None:
pulumi.set(__self__, "monies", monies)
if permyriad is not None:
pulumi.set(__self__, "permyriad", permyriad)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if supply_channel_id is not None:
pulumi.set(__self__, "supply_channel_id", supply_channel_id)
if variant is not None:
pulumi.set(__self__, "variant", variant)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="distributionChannelId")
def distribution_channel_id(self) -> Optional[str]:
return pulumi.get(self, "distribution_channel_id")
@property
@pulumi.getter
def monies(self) -> Optional[Sequence['outputs.CartDiscountValueMoney']]:
return pulumi.get(self, "monies")
@property
@pulumi.getter
def permyriad(self) -> Optional[int]:
return pulumi.get(self, "permyriad")
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[str]:
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="supplyChannelId")
def supply_channel_id(self) -> Optional[str]:
return pulumi.get(self, "supply_channel_id")
@property
@pulumi.getter
def variant(self) -> Optional[int]:
return pulumi.get(self, "variant")
@pulumi.output_type
class CartDiscountValueMoney(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "centAmount":
suggest = "cent_amount"
elif key == "currencyCode":
suggest = "currency_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CartDiscountValueMoney. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CartDiscountValueMoney.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CartDiscountValueMoney.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cent_amount: int,
currency_code: str):
pulumi.set(__self__, "cent_amount", cent_amount)
pulumi.set(__self__, "currency_code", currency_code)
@property
@pulumi.getter(name="centAmount")
def cent_amount(self) -> int:
return pulumi.get(self, "cent_amount")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> str:
return pulumi.get(self, "currency_code")
@pulumi.output_type
class ProductTypeAttribute(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "inputHint":
suggest = "input_hint"
elif key == "inputTip":
suggest = "input_tip"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProductTypeAttribute. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProductTypeAttribute.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProductTypeAttribute.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
label: Mapping[str, Any],
name: str,
type: 'outputs.ProductTypeAttributeType',
constraint: Optional[str] = None,
input_hint: Optional[str] = None,
input_tip: Optional[Mapping[str, Any]] = None,
required: Optional[bool] = None,
searchable: Optional[bool] = None):
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if constraint is not None:
pulumi.set(__self__, "constraint", constraint)
if input_hint is not None:
pulumi.set(__self__, "input_hint", input_hint)
if input_tip is not None:
pulumi.set(__self__, "input_tip", input_tip)
if required is not None:
pulumi.set(__self__, "required", required)
if searchable is not None:
pulumi.set(__self__, "searchable", searchable)
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> 'outputs.ProductTypeAttributeType':
return pulumi.get(self, "type")
@property
@pulumi.getter
def constraint(self) -> Optional[str]:
return pulumi.get(self, "constraint")
@property
@pulumi.getter(name="inputHint")
def input_hint(self) -> Optional[str]:
return pulumi.get(self, "input_hint")
@property
@pulumi.getter(name="inputTip")
def input_tip(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "input_tip")
@property
@pulumi.getter
def required(self) -> Optional[bool]:
return pulumi.get(self, "required")
@property
@pulumi.getter
def searchable(self) -> Optional[bool]:
return pulumi.get(self, "searchable")
@pulumi.output_type
class ProductTypeAttributeType(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ElementType2":
suggest = "element_type2"
elif key == "localizedValues":
suggest = "localized_values"
elif key == "referenceTypeId":
suggest = "reference_type_id"
elif key == "typeReference":
suggest = "type_reference"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProductTypeAttributeType. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProductTypeAttributeType.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProductTypeAttributeType.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
element_type2: Optional['outputs.ProductTypeAttributeTypeElementType2'] = None,
localized_values: Optional[Sequence['outputs.ProductTypeAttributeTypeLocalizedValue']] = None,
reference_type_id: Optional[str] = None,
type_reference: Optional[str] = None,
values: Optional[Mapping[str, Any]] = None):
pulumi.set(__self__, "name", name)
if element_type2 is not None:
pulumi.set(__self__, "element_type2", element_type2)
if localized_values is not None:
pulumi.set(__self__, "localized_values", localized_values)
if reference_type_id is not None:
pulumi.set(__self__, "reference_type_id", reference_type_id)
if type_reference is not None:
pulumi.set(__self__, "type_reference", type_reference)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ElementType2")
def element_type2(self) -> Optional['outputs.ProductTypeAttributeTypeElementType2']:
return pulumi.get(self, "element_type2")
@property
@pulumi.getter(name="localizedValues")
def localized_values(self) -> Optional[Sequence['outputs.ProductTypeAttributeTypeLocalizedValue']]:
return pulumi.get(self, "localized_values")
@property
@pulumi.getter(name="referenceTypeId")
def reference_type_id(self) -> Optional[str]:
return pulumi.get(self, "reference_type_id")
@property
@pulumi.getter(name="typeReference")
def type_reference(self) -> Optional[str]:
return pulumi.get(self, "type_reference")
@property
@pulumi.getter
def values(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ProductTypeAttributeTypeElementType2(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "localizedValues":
suggest = "localized_values"
elif key == "referenceTypeId":
suggest = "reference_type_id"
elif key == "typeReference":
suggest = "type_reference"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProductTypeAttributeTypeElementType2. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProductTypeAttributeTypeElementType2.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProductTypeAttributeTypeElementType2.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
localized_values: Optional[Sequence['outputs.ProductTypeAttributeTypeElementType2LocalizedValue']] = None,
reference_type_id: Optional[str] = None,
type_reference: Optional[str] = None,
values: Optional[Mapping[str, Any]] = None):
pulumi.set(__self__, "name", name)
if localized_values is not None:
pulumi.set(__self__, "localized_values", localized_values)
if reference_type_id is not None:
pulumi.set(__self__, "reference_type_id", reference_type_id)
if type_reference is not None:
pulumi.set(__self__, "type_reference", type_reference)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="localizedValues")
def localized_values(self) -> Optional[Sequence['outputs.ProductTypeAttributeTypeElementType2LocalizedValue']]:
return pulumi.get(self, "localized_values")
@property
@pulumi.getter(name="referenceTypeId")
def reference_type_id(self) -> Optional[str]:
return pulumi.get(self, "reference_type_id")
@property
@pulumi.getter(name="typeReference")
def type_reference(self) -> Optional[str]:
return pulumi.get(self, "type_reference")
@property
@pulumi.getter
def values(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ProductTypeAttributeTypeElementType2LocalizedValue(dict):
def __init__(__self__, *,
key: str,
label: Mapping[str, Any]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
@pulumi.output_type
class ProductTypeAttributeTypeLocalizedValue(dict):
def __init__(__self__, *,
key: str,
label: Mapping[str, Any]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
@pulumi.output_type
class ProjectSettingsCarts(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "countryTaxRateFallbackEnabled":
suggest = "country_tax_rate_fallback_enabled"
elif key == "deleteDaysAfterLastModification":
suggest = "delete_days_after_last_modification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProjectSettingsCarts. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProjectSettingsCarts.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProjectSettingsCarts.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
country_tax_rate_fallback_enabled: bool,
delete_days_after_last_modification: Optional[int] = None):
pulumi.set(__self__, "country_tax_rate_fallback_enabled", country_tax_rate_fallback_enabled)
if delete_days_after_last_modification is not None:
pulumi.set(__self__, "delete_days_after_last_modification", delete_days_after_last_modification)
@property
@pulumi.getter(name="countryTaxRateFallbackEnabled")
def country_tax_rate_fallback_enabled(self) -> bool:
return pulumi.get(self, "country_tax_rate_fallback_enabled")
@property
@pulumi.getter(name="deleteDaysAfterLastModification")
def delete_days_after_last_modification(self) -> Optional[int]:
return pulumi.get(self, "delete_days_after_last_modification")
@pulumi.output_type
class ProjectSettingsExternalOauth(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationHeader":
suggest = "authorization_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProjectSettingsExternalOauth. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProjectSettingsExternalOauth.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProjectSettingsExternalOauth.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_header: str,
url: str):
pulumi.set(__self__, "authorization_header", authorization_header)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="authorizationHeader")
def authorization_header(self) -> str:
return pulumi.get(self, "authorization_header")
@property
@pulumi.getter
def url(self) -> str:
return pulumi.get(self, "url")
@pulumi.output_type
class ProjectSettingsMessages(dict):
def __init__(__self__, *,
enabled: bool):
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@pulumi.output_type
class ProjectSettingsShippingRateCartClassificationValue(dict):
def __init__(__self__, *,
key: str,
label: Optional[Mapping[str, Any]] = None):
pulumi.set(__self__, "key", key)
if label is not None:
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "label")
@pulumi.output_type
class ShippingZoneLocation(dict):
def __init__(__self__, *,
country: str,
state: Optional[str] = None):
pulumi.set(__self__, "country", country)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def country(self) -> str:
return pulumi.get(self, "country")
@property
@pulumi.getter
def state(self) -> Optional[str]:
return pulumi.get(self, "state")
@pulumi.output_type
class ShippingZoneRateFreeAbove(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "centAmount":
suggest = "cent_amount"
elif key == "currencyCode":
suggest = "currency_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ShippingZoneRateFreeAbove. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ShippingZoneRateFreeAbove.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ShippingZoneRateFreeAbove.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cent_amount: int,
currency_code: str):
pulumi.set(__self__, "cent_amount", cent_amount)
pulumi.set(__self__, "currency_code", currency_code)
@property
@pulumi.getter(name="centAmount")
def cent_amount(self) -> int:
return pulumi.get(self, "cent_amount")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> str:
return pulumi.get(self, "currency_code")
@pulumi.output_type
class ShippingZoneRatePrice(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "centAmount":
suggest = "cent_amount"
elif key == "currencyCode":
suggest = "currency_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ShippingZoneRatePrice. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ShippingZoneRatePrice.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ShippingZoneRatePrice.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cent_amount: int,
currency_code: str):
pulumi.set(__self__, "cent_amount", cent_amount)
pulumi.set(__self__, "currency_code", currency_code)
@property
@pulumi.getter(name="centAmount")
def cent_amount(self) -> int:
return pulumi.get(self, "cent_amount")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> str:
return pulumi.get(self, "currency_code")
@pulumi.output_type
class ShippingZoneRateShippingRatePriceTier(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "minimumCentAmount":
suggest = "minimum_cent_amount"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ShippingZoneRateShippingRatePriceTier. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ShippingZoneRateShippingRatePriceTier.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ShippingZoneRateShippingRatePriceTier.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
price: 'outputs.ShippingZoneRateShippingRatePriceTierPrice',
type: str,
minimum_cent_amount: Optional[int] = None,
score: Optional[float] = None,
value: Optional[str] = None):
pulumi.set(__self__, "price", price)
pulumi.set(__self__, "type", type)
if minimum_cent_amount is not None:
pulumi.set(__self__, "minimum_cent_amount", minimum_cent_amount)
if score is not None:
pulumi.set(__self__, "score", score)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def price(self) -> 'outputs.ShippingZoneRateShippingRatePriceTierPrice':
return pulumi.get(self, "price")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="minimumCentAmount")
def minimum_cent_amount(self) -> Optional[int]:
return pulumi.get(self, "minimum_cent_amount")
@property
@pulumi.getter
def score(self) -> Optional[float]:
return pulumi.get(self, "score")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class ShippingZoneRateShippingRatePriceTierPrice(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "centAmount":
suggest = "cent_amount"
elif key == "currencyCode":
suggest = "currency_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ShippingZoneRateShippingRatePriceTierPrice. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ShippingZoneRateShippingRatePriceTierPrice.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ShippingZoneRateShippingRatePriceTierPrice.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cent_amount: int,
currency_code: str):
pulumi.set(__self__, "cent_amount", cent_amount)
pulumi.set(__self__, "currency_code", currency_code)
@property
@pulumi.getter(name="centAmount")
def cent_amount(self) -> int:
return pulumi.get(self, "cent_amount")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> str:
return pulumi.get(self, "currency_code")
@pulumi.output_type
class SubscriptionChange(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceTypeIds":
suggest = "resource_type_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubscriptionChange. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubscriptionChange.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubscriptionChange.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resource_type_ids: Optional[Sequence[str]] = None):
if resource_type_ids is not None:
pulumi.set(__self__, "resource_type_ids", resource_type_ids)
@property
@pulumi.getter(name="resourceTypeIds")
def resource_type_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "resource_type_ids")
@pulumi.output_type
class SubscriptionDestination(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessKey":
suggest = "access_key"
elif key == "accessSecret":
suggest = "access_secret"
elif key == "connectionString":
suggest = "connection_string"
elif key == "projectId":
suggest = "project_id"
elif key == "queueUrl":
suggest = "queue_url"
elif key == "topicArn":
suggest = "topic_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubscriptionDestination. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubscriptionDestination.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubscriptionDestination.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
region: str,
type: str,
uri: str,
access_key: Optional[str] = None,
access_secret: Optional[str] = None,
connection_string: Optional[str] = None,
project_id: Optional[str] = None,
queue_url: Optional[str] = None,
topic: Optional[str] = None,
topic_arn: Optional[str] = None):
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "uri", uri)
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if access_secret is not None:
pulumi.set(__self__, "access_secret", access_secret)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if queue_url is not None:
pulumi.set(__self__, "queue_url", queue_url)
if topic is not None:
pulumi.set(__self__, "topic", topic)
if topic_arn is not None:
pulumi.set(__self__, "topic_arn", topic_arn)
@property
@pulumi.getter
def region(self) -> str:
return pulumi.get(self, "region")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def uri(self) -> str:
return pulumi.get(self, "uri")
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[str]:
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="accessSecret")
def access_secret(self) -> Optional[str]:
return pulumi.get(self, "access_secret")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[str]:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="queueUrl")
def queue_url(self) -> Optional[str]:
return pulumi.get(self, "queue_url")
@property
@pulumi.getter
def topic(self) -> Optional[str]:
return pulumi.get(self, "topic")
@property
@pulumi.getter(name="topicArn")
def topic_arn(self) -> Optional[str]:
return pulumi.get(self, "topic_arn")
@pulumi.output_type
class SubscriptionFormat(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudEventsVersion":
suggest = "cloud_events_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubscriptionFormat. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubscriptionFormat.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubscriptionFormat.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
cloud_events_version: Optional[str] = None):
pulumi.set(__self__, "type", type)
if cloud_events_version is not None:
pulumi.set(__self__, "cloud_events_version", cloud_events_version)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="cloudEventsVersion")
def cloud_events_version(self) -> Optional[str]:
return pulumi.get(self, "cloud_events_version")
@pulumi.output_type
class SubscriptionMessage(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceTypeId":
suggest = "resource_type_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubscriptionMessage. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubscriptionMessage.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubscriptionMessage.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resource_type_id: Optional[str] = None,
types: Optional[Sequence[str]] = None):
if resource_type_id is not None:
pulumi.set(__self__, "resource_type_id", resource_type_id)
if types is not None:
pulumi.set(__self__, "types", types)
@property
@pulumi.getter(name="resourceTypeId")
def resource_type_id(self) -> Optional[str]:
return pulumi.get(self, "resource_type_id")
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@pulumi.output_type
class TaxCategoryRateSubRate(dict):
def __init__(__self__, *,
amount: float,
name: str):
pulumi.set(__self__, "amount", amount)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def amount(self) -> float:
return pulumi.get(self, "amount")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@pulumi.output_type
class TypeField(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "inputHint":
suggest = "input_hint"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TypeField. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TypeField.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TypeField.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
label: Mapping[str, Any],
name: str,
type: 'outputs.TypeFieldType',
input_hint: Optional[str] = None,
required: Optional[bool] = None):
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if input_hint is not None:
pulumi.set(__self__, "input_hint", input_hint)
if required is not None:
pulumi.set(__self__, "required", required)
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> 'outputs.TypeFieldType':
return pulumi.get(self, "type")
@property
@pulumi.getter(name="inputHint")
def input_hint(self) -> Optional[str]:
return pulumi.get(self, "input_hint")
@property
@pulumi.getter
def required(self) -> Optional[bool]:
return pulumi.get(self, "required")
@pulumi.output_type
class TypeFieldType(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ElementType2":
suggest = "element_type2"
elif key == "localizedValues":
suggest = "localized_values"
elif key == "referenceTypeId":
suggest = "reference_type_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TypeFieldType. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TypeFieldType.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TypeFieldType.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
element_type2: Optional['outputs.TypeFieldTypeElementType2'] = None,
localized_values: Optional[Sequence['outputs.TypeFieldTypeLocalizedValue']] = None,
reference_type_id: Optional[str] = None,
values: Optional[Mapping[str, Any]] = None):
pulumi.set(__self__, "name", name)
if element_type2 is not None:
pulumi.set(__self__, "element_type2", element_type2)
if localized_values is not None:
pulumi.set(__self__, "localized_values", localized_values)
if reference_type_id is not None:
pulumi.set(__self__, "reference_type_id", reference_type_id)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ElementType2")
def element_type2(self) -> Optional['outputs.TypeFieldTypeElementType2']:
return pulumi.get(self, "element_type2")
@property
@pulumi.getter(name="localizedValues")
def localized_values(self) -> Optional[Sequence['outputs.TypeFieldTypeLocalizedValue']]:
return pulumi.get(self, "localized_values")
@property
@pulumi.getter(name="referenceTypeId")
def reference_type_id(self) -> Optional[str]:
return pulumi.get(self, "reference_type_id")
@property
@pulumi.getter
def values(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "values")
@pulumi.output_type
class TypeFieldTypeElementType2(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "localizedValues":
suggest = "localized_values"
elif key == "referenceTypeId":
suggest = "reference_type_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TypeFieldTypeElementType2. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TypeFieldTypeElementType2.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TypeFieldTypeElementType2.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
localized_values: Optional[Sequence['outputs.TypeFieldTypeElementType2LocalizedValue']] = None,
reference_type_id: Optional[str] = None,
values: Optional[Mapping[str, Any]] = None):
pulumi.set(__self__, "name", name)
if localized_values is not None:
pulumi.set(__self__, "localized_values", localized_values)
if reference_type_id is not None:
pulumi.set(__self__, "reference_type_id", reference_type_id)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="localizedValues")
def localized_values(self) -> Optional[Sequence['outputs.TypeFieldTypeElementType2LocalizedValue']]:
return pulumi.get(self, "localized_values")
@property
@pulumi.getter(name="referenceTypeId")
def reference_type_id(self) -> Optional[str]:
return pulumi.get(self, "reference_type_id")
@property
@pulumi.getter
def values(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "values")
@pulumi.output_type
class TypeFieldTypeElementType2LocalizedValue(dict):
def __init__(__self__, *,
key: str,
label: Mapping[str, Any]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
@pulumi.output_type
class TypeFieldTypeLocalizedValue(dict):
def __init__(__self__, *,
key: str,
label: Mapping[str, Any]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "label", label)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> Mapping[str, Any]:
return pulumi.get(self, "label")
| 43,347 | 13,219 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from inspect import getargspec
import json
import os
import sys
from time import sleep
import logging
from .scan import LGTVScan
from .remote import LGTVRemote
from .auth import LGTVAuth
search_config = [
"/etc/lgtv/config.json",
"~/.lgtv/config.json",
"/opt/venvs/lgtv/config/config.json"
]
def usage(error=None):
if error:
print ("Error: " + error)
print ("LGTV Controller")
print ("Author: Karl Lattimer <karl@qdh.org.uk>")
print ("Usage: lgtv <command> [parameter]\n")
print ("Available Commands:")
print (" -i interactive mode")
print (" scan")
print (" auth <host> <tv_name>")
commands = LGTVRemote.getCommands()
for c in commands:
args = getargspec(LGTVRemote.__dict__[c])
if len(args.args) > 1:
a = ' <' + '> <'.join(args.args[1:-1]) + '>'
print (' <tv_name> ' + c + a)
else:
print (' <tv_name> ' + c)
def parseargs(command, argv):
args = getargspec(LGTVRemote.__dict__[command])
args = args.args[1:-1]
#if len(args) != len(argv):
# raise Exception("Argument lengths do not match")
output = {}
for (i, a) in enumerate(args):
if argv[i].lower() == "true":
argv[i] = True
elif argv[i].lower() == "false":
argv[i] = False
try:
f = int(argv[i])
argv[i] = f
except:
try:
f = float(argv[i])
argv[i] = f
except:
pass
output[a] = argv[i]
return output
def find_config():
w = None
for f in search_config:
f = os.path.expanduser(f)
f = os.path.abspath(f)
d = os.path.dirname(f)
if os.path.exists(d):
if os.access(d, os.W_OK):
w = f
if os.path.exists(f):
if os.access(f, os.W_OK):
return f
elif os.access(os.path.dirname(d), os.W_OK):
os.makedirs(d)
w = f
if w is None:
print ("Cannot find suitable config path to write, create one in %s" % ' or '.join(search_config))
raise Exception("No config file")
return w
def main():
if len(sys.argv) < 2:
usage("Too few arguments")
sys.exit(1)
logging.basicConfig(level=logging.DEBUG)
command = None
filename = None
config = {}
filename = find_config()
if filename is not None:
try:
with open(filename) as f:
config = json.loads(f.read())
except:
pass
if sys.argv[1] == "scan":
results = LGTVScan()
if len(results) > 0:
print (json.dumps({
"result": "ok",
"count": len(results),
"list": results
}))
sys.exit(0)
else:
print (json.dumps({
"result": "failed",
"count": len(results)
}))
sys.exit(1)
if sys.argv[1] == "-i":
pass
elif sys.argv[1] == "auth":
if len(sys.argv) < 3:
usage("Hostname or IP is required for auth")
sys.exit(1)
if len(sys.argv) < 4:
usage("TV name is required for auth")
sys.exit(1)
name = sys.argv[3]
host = sys.argv[2]
ws = LGTVAuth(name, host)
ws.connect()
ws.run_forever()
sleep(1)
config[name] = ws.serialise()
if filename is not None:
with open(filename, 'w') as f:
f.write(json.dumps(config))
print ("Wrote config file: " + filename)
sys.exit(0)
elif len(sys.argv) >= 2 and sys.argv[2] == "on":
name = sys.argv[1]
ws = LGTVRemote(name, **config[name])
ws.on()
sleep(1)
sys.exit(0)
else:
try:
args = parseargs(sys.argv[2], sys.argv[3:])
name = sys.argv[1]
command = sys.argv[2]
except Exception as e:
usage(str(e))
sys.exit(1)
try:
ws = LGTVRemote(name, **config[name])
ws.connect()
if command is not None:
ws.execute(command, args)
ws.run_forever()
except KeyboardInterrupt:
ws.close()
if __name__ == '__main__':
main()
| 4,437 | 1,450 |
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from pytest import mark
from tests.common_tools import template_used # , template_paths
# needs to be marked because Django CMS will interfere
@mark.django_db
def test_if_menu_html_template_is_used_on_404_page(client):
url = "/asasd/foo/bar/404/"
response = client.get(url, follow=True)
assert template_used(response, '404.html', http_status=404)
assert template_used(response, 'menu/menu.html', http_status=404)
| 508 | 177 |
"""Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
import torch
import torchvision.transforms as transforms
import random
from data.MyFunction import my_data_creator
from data.MyFunction import my_transforms
from util import my_util
class MasterCycleGANDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
# parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
# parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
parser.add_argument('--matrix', type=str, default='Cb', help='input matrix')
parser.add_argument('--LOOid', type=int, default=-1, help='Leave-one-out cross-validation id')
parser.add_argument('--diff', type=bool, default=False)
parser.set_defaults(input_nc=1, output_nc=1) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
# self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
# self.transform = get_transform(opt)
data = my_data_creator.MyDataCreator(opt)
matrix_size = [len(i) for i in data.data_A]
input_n = max(matrix_size)
for i in range(4):
if input_n%4 == 0:
break
input_n += 1
transform = transforms.Compose([
my_transforms.preprocess(input_n),
transforms.ToTensor()
])
data_A = data.data_A
if opt.diff:
data_B = [data.data_B[i//3] - data_A[i] for i in range(len(data_A))]
else:
data_B = data.data_B
if opt.LOOid < 0:
val_A = [data_A[i] for i in range(3)]
if opt.diff:
val_B = [data_B[i] for i in range(3)]
else:
val_B = [data_B[0]]
else:
val_A = [data_A[i] for i in range(opt.LOOid*3, opt.LOOid*3 + 3)]
data_A = data_A[:opt.LOOid*3] + data_A[opt.LOOid*3 + 3:]
if opt.diff:
val_B = data_B[opt.LOOid*3:opt.LOOid*3 + 3]
data_B = data_B[:opt.LOOid*3] + data_B[opt.LOOid*3 + 3:]
else:
val_B = [data_B[opt.LOOid]]
data_B = data_B[:opt.LOOid] + data_B[opt.LOOid + 1:]
if not my_util.val:
self.data_A = [transform(i) for i in data_A]
self.data_B = [transform(i) for i in data_B]
else:
self.data_A = [transform(i) for i in val_A]
self.data_B = [transform(i) for i in val_B]
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
# data_A = torch.Tensor(self.data.data_A) # needs to be a tensor
# data_B = torch.Tensor(self.data.data_B) # needs to be a tensor
A = self.data_A[index % len(self.data_A)]
index_B = random.randint(0, len(self.data_B) - 1)
B = self.data_B[index_B]
return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
def __len__(self):
"""Return the total number of images."""
return max(len(self.data_A), len(self.data_B))
| 5,809 | 1,725 |
# GRAYSCALE
WHITE = (255, 255, 255)
L_GRAY = (190, 190, 190)
GRAY = (127, 127, 127)
D_GRAY = (63, 63, 63 )
BLACK = (0, 0, 0 )
# RGB
RED = (255, 0, 0 )
YELLOW = (255, 255, 0 )
GREEN = (0, 255, 0 )
INDIGO = (0, 255, 255)
BLUE = (0, 0, 255)
MAGENTA = (255, 0, 255) | 317 | 225 |
from django.conf.urls import url
import os
from gateway.api_manager import views
urlpatterns = [
url(r'^superm$', views.superm)
]
| 134 | 49 |
#!/usr/bin/env python
import sys
from powerline.bindings.wm import DEFAULT_UPDATE_INTERVAL
from powerline.bindings.wm.awesome import run
def main():
try:
interval = float(sys.argv[1])
except IndexError:
interval = DEFAULT_UPDATE_INTERVAL
run(interval=interval)
if __name__ == '__main__':
main()
| 332 | 108 |
"""RefreshMapProcessor:
refresh sections in map
"""
# pylint: disable=C0116,R0903,E0401,W0703,W1201,redefined-outer-name,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203
from configs.config import ConfigMap
from models.map import Map
class RefreshMapProcessor:
"""RefreshMapProcessor"""
def __init__(self, config_map: ConfigMap, persist_fs):
"""init"""
self.config_map = config_map
self.persist_fs = persist_fs
def process(self):
"""Scan the repo and for each new_section add it to the map, save the map file."""
sections = Map.build_from_dirs(
self.config_map,
self.persist_fs,
self.persist_fs.list_dirs(self.config_map.get_repo_path),
)
map_: Map = Map(self.config_map, self.persist_fs, sections)
map_.write(self.config_map.get_repo_sorted)
| 882 | 326 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Copyright (c) 2013-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import pathlib
from django.conf import settings
import app_datasets.models as datasets_models
import app_ctdprofiles.models as ctdprofiles_models
import sharkdata_core
@sharkdata_core.singleton
class DatasetUtils(object):
""" Singleton class. """
def __init__(self):
""" """
self._data_header = None
self._translations = None
self._data_in_datasets = settings.SHARKDATA_DATA_IN_DATASETS
self._data_datasets = pathlib.Path(settings.SHARKDATA_DATA, "datasets")
self._metadata_update_thread = None
self._generate_archives_thread = None
def translateDataHeaders(
self, data_header, resource_name="translate_headers", language="darwin_core"
):
# language = 'english'):
""" """
return sharkdata_core.ResourcesUtils().translateHeaders(
data_header, resource_name, language
)
def getDatasetListHeaders(self):
""" """
if not self._data_header:
self._data_header = [
"dataset_name",
"datatype",
"version",
"dataset_file_name",
]
#
return self._data_header
def translateDatasetListHeaders(self, data_header, language=None):
""" """
# if not language:
# return data_header
#
translated = []
#
if not self._translations:
self._translations = {
"dataset_name": "Dataset name",
"datatype": "Datatype",
"version": "Version",
"dataset_file_name": "File name",
}
#
for item in data_header:
if item in self._translations:
translated.append(self._translations[item])
else:
translated.append(item)
#
return translated
def getDataAsText(self, dataset_name):
""" Data is not stored in database, get from zip file."""
db_dataset = datasets_models.Datasets.objects.get(dataset_name=dataset_name)
#
# Extract data part.
data_content = ""
zipreader = sharkdata_core.SharkArchiveFileReader(
db_dataset.dataset_file_name, self._data_in_datasets
)
try:
zipreader.open()
data_content = zipreader.getDataAsText().decode(
"cp1252"
) # Default encoding in archive data.
finally:
zipreader.close()
# print(data_content)
#
return data_content
def getDataColumnsAsText(self, dataset_name):
""" Data is not stored in database, get from zip file."""
db_dataset = datasets_models.Datasets.objects.get(dataset_name=dataset_name)
#
# Extract data part.
data_content = ""
zipreader = sharkdata_core.SharkArchiveFileReader(
db_dataset.dataset_file_name, self._data_in_datasets
)
try:
zipreader.open()
data_content = zipreader.getDataColumnsAsText().decode(
"cp1252"
) # Default encoding in archive data.
finally:
zipreader.close()
# print(data_content)
#
return data_content
def getMetadataAsText(self, dataset_name):
""" """
db_dataset = datasets_models.Datasets.objects.get(dataset_name=dataset_name)
# Fix line breaks for windows. Remove rows with no key-value-pairs.
metadata_list = []
concat_metadata = (
db_dataset.content_metadata + "\n" + db_dataset.content_metadata_auto
)
for row in concat_metadata.split("\n"):
if ":" in row:
parts = row.split(":", 1) # Split on first occurence.
key = parts[0].strip()
value = parts[1].strip()
metadata_list.append(key + ": " + value)
#
return "\r\n".join(metadata_list)
def writeLatestDatasetsInfoToDb(self, logfile_name=None, user=""):
"""Updates the database from datasets stored in the FTP area.
I multiple versions of a dataset are in the FTP area only the latest
will be loaded.
"""
# Check dataset in 'data_in/datasets'. Create a list of dataset names.
dataset_names = []
for dataset_path in self._data_in_datasets.glob("SHARK_*.zip"):
print(dataset_path.name)
parts = dataset_path.name.split("_version")
if len(parts) >= 1:
dataset_names.append(parts[0])
# Remove all datasets from 'data/datasets' not included in 'dataset_names'.
for dataset_path in self._data_datasets.glob("SHARK_*.zip"):
print(dataset_path.name)
parts = dataset_path.name.split("_version")
if len(parts) >= 1:
if parts[0] not in dataset_names:
# Delete the file.
dataset_path.unlink() # Removes file.
# Remove from database.
datasets_models.Datasets.objects.get(
dataset_name=dataset_path.name
).delete()
error_counter = 0
# Remove all db rows.
datasets_models.Datasets.objects.all().delete()
# CTD profiles.
ctdprofiles_models.CtdProfiles.objects.all().delete()
# Get latest datasets from FTP archive.
archive = sharkdata_core.SharkArchive(self._data_in_datasets)
for file_name in sorted(archive.getLatestSharkArchiveFilenames()):
if logfile_name:
sharkdata_core.SharkdataAdminUtils().log_write(
logfile_name, log_row="Loading file: " + file_name + "..."
)
try:
error_string = self.writeFileInfoToDb(file_name, logfile_name, user)
if error_string:
error_counter += 1
sharkdata_core.SharkdataAdminUtils().log_write(
logfile_name,
log_row="ERROR: Failed to load: "
+ file_name
+ ". Error: "
+ error_string,
)
except Exception as e:
error_counter += 1
sharkdata_core.SharkdataAdminUtils().log_write(
logfile_name,
log_row="ERROR: Failed to load: "
+ file_name
+ ". Error: "
+ str(e),
)
#
return error_counter
def writeFileInfoToDb(self, file_name, logfile_name=None, user=""):
""" Extracts info from the dataset filename and from the zip file content and adds to database. """
try:
#
ftp_file_path = pathlib.Path(self._data_in_datasets, file_name)
# Extract info from file name.
dataset_name, datatype, version = self.splitFilename(file_name)
# Extract metadata parts.
metadata = ""
metadata_auto = ""
columndata_available = False
#
zipreader = sharkdata_core.SharkArchiveFileReader(
file_name, self._data_in_datasets
)
try:
zipreader.open()
#
try:
metadata = zipreader.getMetadataAsText()
encoding = "cp1252"
metadata = str(metadata, encoding, "strict")
except Exception as e:
sharkdata_core.SharkdataAdminUtils().log_write(
logfile_name, log_row="WARNING: " + str(e)
)
#
try:
metadata_auto = zipreader.getMetadataAutoAsText()
encoding = "cp1252"
metadata_auto = str(metadata_auto, encoding, "strict")
except Exception as e:
sharkdata_core.SharkdataAdminUtils().log_write(
logfile_name, log_row="WARNING: " + str(e)
)
#
columndata_available = zipreader.isDataColumnsAvailable()
# CTD profiles.
ctd_profiles_table = None
# if datatype == 'CTDprofile':
if datatype == "Profile":
ctd_profiles_table = zipreader.getDataAsText()
finally:
zipreader.close()
# Remove from database.
try:
db_dataset = datasets_models.Datasets.objects.get(
dataset_name=dataset_name
)
db_dataset.delete()
except datasets_models.Datasets.DoesNotExist:
pass # Not found.
# Save to db.
dataset = datasets_models.Datasets(
dataset_name=dataset_name,
datatype=datatype,
version=version,
dataset_file_name=file_name,
ftp_file_path=ftp_file_path,
content_data="NOT USED",
content_metadata=metadata,
content_metadata_auto=metadata_auto,
#
column_data_available=columndata_available,
)
dataset.save()
if ctd_profiles_table:
data_header = []
ctd_profiles_table = ctd_profiles_table.decode("cp1252")
for index, row in enumerate(ctd_profiles_table.split("\n")):
rowitems = row.strip().split("\t")
if index == 0:
data_header = rowitems
else:
if len(rowitems) > 1:
row_dict = dict(zip(data_header, rowitems))
water_depth_m = 0.0
try:
water_depth_m = float(
row_dict.get("water_depth_m", -99)
)
except:
pass
db_profiles = ctdprofiles_models.CtdProfiles(
visit_year=row_dict.get("visit_year", ""), # '2002',
platform_code=row_dict.get(
"platform_code", ""
), # 'Svea',
expedition_id=row_dict.get(
"expedition_id", ""
), # 'aa-bb-11',
visit_id=row_dict.get("visit_id", ""), # '123456',
station_name=row_dict.get(
"station_name", ""
), # 'Station1A',
latitude=float(
row_dict.get("sample_latitude_dd", -99)
), # 70.00,
longitude=float(
row_dict.get("sample_longitude_dd", -99)
), # 10.00,
water_depth_m=water_depth_m, # '80.0',
sampler_type_code=row_dict.get(
"sampler_type_code", ""
), # 'CTD',
sample_date=row_dict.get(
"visit_date", ""
), # '2000-01-01',
sample_project_code=row_dict.get(
"sample_project_code", ""
), # 'Proj',
# sample_project_code = row_dict.get('sample_project_name_sv', ''), # 'Proj',
sample_orderer_code=row_dict.get(
"sample_orderer_code", ""
), # 'Orderer',
# sample_orderer_code = row_dict.get('sample_orderer_name_sv', ''), # 'Orderer',
sampling_laboratory_code=row_dict.get(
"sampling_laboratory_code", ""
), # 'Slabo',
# sampling_laboratory_code = row_dict.get('sampling_laboratory_name_sv', ''), # 'Slabo',
revision_date=row_dict.get(
"revision_date", ""
), # '2010-10-10',
ctd_profile_name=row_dict.get(
"profile_file_name_db", ""
), # 'ctd.profile',
dataset_file_name=file_name,
ftp_file_path=ftp_file_path,
)
db_profiles.save()
#
return None # No error message.
#
except Exception as e:
return str(e)
def splitFilename(self, file_name):
""" """
filename = pathlib.Path(file_name).stem
parts = filename.split("version")
name = parts[0].strip("_").strip()
version = parts[1].strip("_").strip() if len(parts) > 0 else ""
#
parts = filename.split("_")
datatype = parts[1].strip("_").strip()
#
return name, datatype, version
| 14,047 | 3,652 |
import time
import yaml
def read_config_file(file_name):
"""Setup config parameters
Read and store configuration parameters
from given config file
"""
with open(file_name) as config_file:
config_values = {}
config = yaml.safe_load(config_file)
# get values from yaml
config_values["benchmark_name"] = config["Name"]
config_values["cluster_name"] = config["Cluster"]["Name"]
config_values["cluster_ip"] = config["Cluster"]["IP"]
config_values["servicegraphs"] = config["Servicegraphs"]
config_values["result_location"] = config["Result_location"]
config_values["qps_from"] = config["QPS"]["From"]
config_values["qps_to"] = config["QPS"]["To"]
config_values["qps_granularity"] = config["QPS"]["Granularity"]
config_values["load_preheat"] = config["Load"]["Preheat"]
config_values["load_time"] = config["Load"]["Time"]
config_values["load_users"] = config["Load"]["Users"]
config_values["load_ip"] = config["Load"]["ServiceIP"]
config_values["load_timeout"] = config["Load"]["TimeOut"]
config_values["load_port"] = config["Load"]["ServicePort"]
config_values["load_path"] = config["Load"]["ServicePath"]
config_values["load_query"] = config["Load"]["ServiceQuery"]
config_values["prometheus_ip"] = config["Prometheus"]["IP"]
config_values["prometheus_port"] = config["Prometheus"]["Port"]
config_values["cluster_ip"] = config["Cluster"]["IP"]
config_values["cluster_ip"] = config["Cluster"]["IP"]
return config_values
| 1,660 | 501 |
"""
WSGI Middleware apps that haven't gotten around to being extracted to
their own modules.
"""
import logging
import time
import urllib
from tiddlyweb.model.policy import UserRequiredError, ForbiddenError
from tiddlyweb.store import Store
from tiddlyweb.web.http import HTTP403, HTTP302
from tiddlyweb.web.util import server_base_url
from tiddlyweb import __version__ as VERSION
class Header(object):
"""
If REQUEST_METHOD is HEAD, change it to GET and
consume the output for lower requests.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'HEAD':
environ['REQUEST_METHOD'] = 'GET'
_ = self.application(environ, start_response)
return []
else:
return self.application(environ, start_response)
class HTMLPresenter(object):
"""
Take the core app output, if tiddlyweb.title is set
in environ and we appear to be using a browser,
add some HTML framework.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
output = self.application(environ, start_response)
if self._needs_title(environ):
def wrapped_output(output):
yield self._header(environ)
for item in output:
yield item
yield self._footer(environ)
return
return wrapped_output(output)
return output
def _needs_title(self, environ):
"""
Determine if we are outputting html to a browser.
"""
return ('tiddlyweb.title' in environ and 'Mozilla'
in environ.get('HTTP_USER_AGENT', ''))
def _header(self, environ):
"""
Wrap the HTML in an HTML header.
"""
css = ''
if environ['tiddlyweb.config'].get('css_uri', ''):
css = '<link rel="stylesheet" href="%s" type="text/css" />' % \
environ['tiddlyweb.config']['css_uri']
try:
links = '\n'.join(environ['tiddlyweb.links'])
except KeyError:
links = ''
header_extra = self.header_extra(environ)
return """
<html>
<head>
<title>TiddlyWeb - %s</title>
%s
%s
</head>
<body>
<div id="header">
<h1>%s</h1>
%s
</div>
<div id="content">
""" % (environ['tiddlyweb.title'], css, links,
environ['tiddlyweb.title'], header_extra)
def _footer(self, environ):
"""
Wrap the HTML with an HTML footer.
"""
footer_extra = self.footer_extra(environ)
return """
</div>
<div id="footer">
%s
<div id="badge">This is <a href="http://tiddlyweb.com/">TiddlyWeb</a> %s</div>
<div id="usergreet">User %s.</div>
</div>
</body>
</html>
""" % (footer_extra, VERSION, environ['tiddlyweb.usersign']['name'])
def header_extra(self, environ):
"""
Override this in plugins to add to the header.
"""
return ''
def footer_extra(self, environ):
"""
Override this in plugins to add to the footer.
"""
return ''
class SimpleLog(object):
"""
WSGI Middleware to write a very simple log to stdout.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(time)s] '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
'%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
logger = logging.getLogger()
if logger.isEnabledFor(logging.INFO):
return self._log_app(environ, start_response)
else:
return self.application(environ, start_response)
def _log_app(self, environ, start_response):
req_uri = urllib.quote(environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
def replacement_start_response(status, headers, exc_info=None):
"""
We need to gaze at the content-length, if set, to
write log info.
"""
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self.write_log(environ, req_uri, status, size)
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
def write_log(self, environ, req_uri, status, size):
"""
Print the log info out in a formatted form to logging.info.
This is rather more complex than desirable because there is
a mix of str and unicode in the gathered data and we need to
make it acceptable for output.
"""
environ['REMOTE_USER'] = None
try:
environ['REMOTE_USER'] = environ['tiddlyweb.usersign']['name']
except KeyError:
pass
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR') or '-',
'REMOTE_USER': environ.get('REMOTE_USER') or '-',
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
'time': time.strftime('%d/%b/%Y:%H:%M:%S ', time.localtime()),
'status': status.split(None, 1)[0],
'bytes': size,
'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
}
for key, value in log_format.items():
try:
log_format[key] = value.encode('utf-8', 'replace')
except UnicodeDecodeError:
log_format[key] = value
message = self.format % log_format
logging.info(message)
class StoreSet(object):
"""
WSGI Middleware that sets our choice of Store (tiddlyweb.store)
in the environment.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
database = Store(environ['tiddlyweb.config']['server_store'][0],
environ['tiddlyweb.config']['server_store'][1],
environ)
environ['tiddlyweb.store'] = database
return self.application(environ, start_response)
class EncodeUTF8(object):
"""
WSGI Middleware to ensure that the content we send out the pipe is encoded
as UTF-8. Within the application content is _unicode_ (i.e. not encoded).
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
return (_encoder(output) for output in
self.application(environ, start_response))
def _encoder(string):
"""
Take a potentially unicode string and encode it
as UTF-8.
"""
# if we are currently unicode, encode to utf-8
if type(string) == unicode:
string = string.encode('utf-8')
return string
class PermissionsExceptor(object):
"""
Trap permissions exceptions and turn them into HTTP
exceptions so the errors are propagated to client
code.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
try:
output = self.application(environ, start_response)
return output
except ForbiddenError, exc:
raise HTTP403(exc)
except UserRequiredError, exc:
# We only send to the challenger on a GET
# request. Otherwise we're in for major confusion
# on dealing with redirects and the like in
# scripts and javascript, where follow
# behavior is inconsistent.
if environ['REQUEST_METHOD'] == 'GET':
url = _challenge_url(environ)
raise HTTP302(url)
raise HTTP403(exc)
def _challenge_url(environ):
"""
Generate the URL of the challenge system
so that GET requests are redirected to the
right place.
"""
script_name = environ.get('SCRIPT_NAME', '')
query_string = environ.get('QUERY_STRING', None)
redirect = script_name
if query_string:
redirect += '?%s' % query_string
redirect = urllib.quote(redirect, safe='')
return '%s/challenge?tiddlyweb_redirect=%s' % (
server_base_url(environ), redirect)
| 8,789 | 2,534 |
import unittest
class TestParentDataParams(unittest.TestCase):
def setUp(self):
from treetl import Job
self.expected_results = { jn: i+1 for i, jn in enumerate([ 'JobA', 'JobB', 'JobC', 'JobD' ]) }
self.actual_results = { }
def update_actual_results(job):
self.actual_results[job.__class__.__name__] = job.transformed_data
class LoadToDict(Job):
def load(self, **kwargs):
update_actual_results(self)
class JobA(LoadToDict):
def transform(self, **kwargs):
self.transformed_data = 1
class JobB(LoadToDict):
def transform(self, **kwargs):
self.transformed_data = 2
@Job.dependency(b_data=JobB, a_data=JobA)
class JobC(LoadToDict):
def transform(self, a_data=None, b_data=None, **kwargs):
self.transformed_data = a_data + b_data
@Job.dependency(a_data=JobA, c_data=JobC)
class JobD(LoadToDict):
def transform(self, a_data=None, c_data=None, **kwargs):
self.transformed_data = a_data + c_data
self.jobs = [ JobD(), JobA(), JobC(), JobB() ]
def test_parent_data_params(self):
from treetl import JobRunner
JobRunner(self.jobs).run()
self.assertDictEqual(
d1=self.expected_results,
d2=self.actual_results,
msg='Error in transformed data loaded to dict'
)
if __name__ == '__main__':
unittest.main()
| 1,534 | 484 |
# -*- coding: utf-8 -*-
# 计算修正余弦相似度的Python代码
from math import sqrt
users3 = {
"David": {"爱乐之城": 4, "荒野猎人": 5,"银河护卫队2": 4, "长城": 1},
"Matt": {"爱乐之城": 3, "荒野猎人": 4,"银河护卫队2": 4, "长城": 1},
"Ben": {"美国队长3": 4, "爱乐之城": 3,"银河护卫队2": 3, "长城": 1},
"Chris": {"美国队长3": 3, "爱乐之城": 4,"荒野猎人": 4, "银河护卫队2": 3},
"Tori": {"美国队长3": 5, "爱乐之城": 4,"荒野猎人": 5, "长城": 3}
}
def computeSimilarity(band1, band2, userRatings):
averages = {}
for (key, ratings) in userRatings.items():
averages[key] = (float(sum(ratings.values())) / len(ratings.values()))
num = 0 # 分子
dem1 = 0 # 分母的第一部分
dem2 = 0
for (user, ratings) in userRatings.items():
if band1 in ratings and band2 in ratings:
avg = averages[user]
num += (ratings[band1] - avg) * (ratings[band2] - avg)
dem1 += (ratings[band1] - avg) ** 2
dem2 += (ratings[band2] - avg) ** 2
return num / (sqrt(dem1) * sqrt(dem2))
print("美国队长3 和 银河护卫队2 相似度: %f " % (computeSimilarity('美国队长3', '银河护卫队2', users3)))
print("爱乐之城 和 银河护卫队2 相似度: %f " % (computeSimilarity('爱乐之城', '银河护卫队2', users3)))
print("荒野猎人 和 银河护卫队2 相似度: %f " % (computeSimilarity('荒野猎人', '银河护卫队2', users3)))
| 1,243 | 762 |
import importlib
from .cv2 import *
from .data import *
# wildcard import above does not import "private" variables like __version__
# this makes them available
globals().update(importlib.import_module('cv2.cv2').__dict__)
def imshow_image(title, image):
import matplotlib.pyplot as plt
plt.imshow(image)
plt.show()
imshow = imshow_image
| 355 | 112 |
from PyQt5 import QtWidgets, QtGui, QtCore, Qt, uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from RayTracing.gui.mplwidget import *
from RayTracing.RayTracing import *
from tabulate import tabulate
from pathlib import Path
from matplotlib.lines import lineStyles
from matplotlib.colors import to_hex, to_rgb
import sys
import time
import argparse
class Error(Exception):
pass
class OperatorModelError(Error):
pass
class SourceModelError(Error):
pass
class ScreenModelError(Error):
pass
class OpticalOperatorModel(QtCore.QObject):
"""
Model for controlling an OpticalOperator
The model should ensure that proper signals are sent whenever the data of the OpticalOperator has been changed.
The model emits the following signals:
:param valueChanged: Signal ([], [float]) emitted whenever the value of the OpticalOperator has changed.
:param zChanged: Signal ([], [float]) emitted whenever the z-value of the OpticalOperator has changed.
:param offsetChanged: Signal ([], [float]) emitted whenever the offset-value of the OpticalOperator has changed.
:param labelChanged: Signal([], [float]) emitted whenever the label of the OpticalOperator has changed.
:param operatorChanged: Signal wmitted whenever any change has been made to the OpticalOperator, inculding the above.
"""
valueChanged = pyqtSignal([], [float], name='valueChanged')
zChanged = pyqtSignal([], [float], name='zChanged')
offsetChanged = pyqtSignal([], [float], name='offsetChanged')
labelChanged = pyqtSignal([], [str], name='labelChanged')
operatorChanged = pyqtSignal(name='operatorChanged')
styleChanged = pyqtSignal([dict], name='styleChanged')
@property
def z(self):
return self._operator.z
@z.setter
def z(self, value):
if isinstance(value, float):
self._operator.z = value
self.zChanged.emit()
self.zChanged[float].emit(value)
self.operatorChanged.emit()
else:
raise OperatorModelError(
f'Cannot set Z-value of {self.__class__.__name__} of {self._operator!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def offset(self):
return self._operator.offset
@offset.setter
def offset(self, value):
if isinstance(value, float):
self._operator.offset = value
self.offsetChanged.emit()
self.offsetChanged[float].emit(value)
self.operatorChanged.emit()
else:
raise OperatorModelError(
f'Cannot set offset-value of {self.__class__.__name__} of {self._operator!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def value(self):
return self._operator.value
@value.setter
def value(self, value):
if isinstance(value, float):
self._operator.value = value
self.valueChanged.emit()
self.valueChanged[float].emit(value)
self.operatorChanged.emit()
else:
raise OperatorModelError(
f'Cannot set operator-value of {self.__class__.__name__} of {self._operator!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def label(self):
return self._operator.label
@label.setter
def label(self, value):
if isinstance(value, str):
self._operator.label = value
self.labelChanged.emit()
self.labelChanged[str].emit(value)
self.operatorChanged.emit()
else:
raise OperatorModelError(
f'Cannot set label-value of {self.__class__.__name__} of {self._operator!r}.') from TypeError(
f'Value {value!r} must be `str`')
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, value):
self._silent = bool(value)
self.blockSignals(self._silent)
@property
def operator_type(self):
return type(self._operator)
@property
def operator_classname(self):
return self._operator.__class__.__name__
@property
def is_deflector(self):
return isinstance(self._operator, Deflector)
@property
def is_lens(self):
return isinstance(self._operator, Lens)
@property
def is_propagator(self):
return isinstance(self._operator, Propagator)
@property
def style(self):
return dict(self._style)
@property
def focal_style(self):
if self.is_lens:
return dict(self._focal_style)
else:
return dict() # raise AttributeError(f'Cannot get focal_style for {self}. Operator {self._operator!r} is not a lens')
def __init__(self, operator, *args, **kwargs):
"""
Create a model for an OpticalOperator
:param operator: The OpticalOperator to model
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type operator: OpticalOperator
"""
super(OpticalOperatorModel, self).__init__(*args, **kwargs)
if not isinstance(operator, OpticalOperator):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {operator!r}. Invalid type {type(operator)}. Accepted types are OpticalOperator and subclasses.')
self._operator = operator
self._silent = False
self._style = dict([['ls', '-'], ['alpha', 1.], ['color', 'k'], ['lw', 1.]])
self._focal_style = dict([['ls', '--'], ['alpha', 0.5], ['color', 'k'], ['lw', 0.5]])
def __repr__(self):
return f'{self.__class__.__name__}({self._operator!r}, {self.parent()})'
def __str__(self):
return f'{self._operator}'
def show(self, *args, **kwargs):
"""
Shows the operator
:param args: Optional positional arguments passed to OpticalOperator.show()
:param kwargs: Optional keyword arguments passed to OpticalOperator.show()
:return:
"""
kwargs.update(self.style)
print(kwargs)
if self.is_lens:
return self._operator.show(*args, focal_plane_kwargs=self._focal_style, **kwargs)
else:
return self._operator.show(*args, **kwargs)
def set_style(self, key, value, focal=False):
f"""
Sets one of the style fields to the given value
:param focal: Whether to set the style for focal planes or not. Only applicable if the optical operator is a Lens.
:param key: The key to set. Should be one of {list(self._style.keys())}
:param value: The value to set the field to.
:type key: str
:type value: Union[float, int, str]
:return:
"""
if focal:
if key in self.focal_style:
self._focal_style[key] = value
else:
raise ValueError(f'Cannot set focal style {key} to {value} for {self}: Key {key!r} not recognized')
else:
if key in self.style:
self._style[key] = value
else:
raise ValueError(f'Cannot set style {key} to {value} for {self}: Key {key!r} not recognized')
self.styleChanged.emit()
class OpticalOperatorController(QtCore.QObject):
"""
Controller for controlling an OpticalOperatorModel
The controller has a series of preset values that can be used to store certain values in a dictionary with integer keys.
"""
presetsChanged = pyqtSignal([], name='presetsChanged')
@property
def value_presets(self):
return self._value_presets
@property
def model_name(self):
return str(self._model.label)
@property
def model(self):
return self._model
def __init__(self, model, *args, **kwargs):
"""
Create a controller for an OpticalOperatorModel
:param model: The model to control
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type model: OpticalOperatorModel
"""
super(OpticalOperatorController, self).__init__(*args, **kwargs)
if not isinstance(model, OpticalOperatorModel):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {model!r}. Invalid type {type(model)}. Accepted types are `OpticalOperatorModel` and subclasses')
self._model = model
self._value_presets = dict()
@pyqtSlot(int, float, name='setValuePreset')
def setValuePreset(self, preset, value):
"""
Sets/adds a preset value
:param preset: Preset-key
:param value: Preset-value
:type preset: int
:type value: float
"""
self._value_presets[preset] = value
self.presetsChanged.emit()
@pyqtSlot(int, name='setSilent')
@pyqtSlot(bool, name='setSilent')
@pyqtSlot(float, name='setSilent')
def setSilent(self, value):
"""
Disable signals from the model
:param value: whether to disable or enable signals
:param value: Union[int, float, bool]
:return:
"""
self._model.silent = value
@pyqtSlot(float, name='setZ')
def setZ(self, value):
"""
Set the z-position of the model
:param value: z-value
:type value: float
"""
self._model.z = value
@pyqtSlot(float, name='setOffset')
def setOffset(self, value):
"""
Set the offset-value of the model
:param value: offset-value
:type value: float
:return:
"""
self._model.offset = value
@pyqtSlot(float, name='setValue')
def setFloatValue(self, value):
"""
Set the value of the model
:param value: the value
:type value: float
:return:
"""
self._model.value = value
@pyqtSlot(int, name='setValue')
def setIntValue(self, value):
"""
Set the value of the model based on preset values
:param value: The preset-key to use
:type value: int
:return:
"""
operator_value = self._value_presets.get(value, float(value))
self._model.value = operator_value
@pyqtSlot(str, float)
def setParameter(self, parameter, value):
"""
Sets a given parameter to a given value
:param parameter: The parameter to set. SHould be either "z", "offset", "value-float" or "value-int"
:param value: The value to set
:type parameter: str
:type value: float
:return:
"""
if parameter.lower() == 'z':
self.setZ(value)
elif parameter.lower() == 'offset':
self.setOffset(value)
elif parameter.lower() == 'value-float':
self.setFloatValue(value)
elif parameter.lower() == 'value-int':
self.setIntValue(int(value))
else:
raise ValueError(f'Could not set parameter {parameter} to {value} for {self!r}: Parameter not recognized.')
@pyqtSlot(str, float, bool)
@pyqtSlot(str, int, bool)
@pyqtSlot(str, str, bool)
def setStyle(self, field, value, focal):
if self._model.is_lens:
self._model.set_style(field, value, focal)
else:
self._model.set_style(field, value, False)
@pyqtSlot(dict, bool)
def setStyleDict(self, styles, focal):
blocked = self._model.signalsBlocked()
if not blocked:
self._model.blockSignals(True)
for key in styles:
self._model.set_style(key, styles[key], focal)
if not blocked:
self._model.blockSignals(False)
self._model.styleChanged[dict].emit(styles)
class StyleWidget(QtWidgets.QWidget):
styleChanged = pyqtSignal([dict])
@property
def styleDict(self):
return dict(self._styledict)
@property
def widgets(self):
return {'style': self._linestyleCombobox, 'width': self._linewidthSpinbox, 'alpha': self._aSpinbox,
'color': self._colorWidget}
def __init__(self, *args, **kwargs):
super(StyleWidget, self).__init__(*args, **kwargs)
self._styledict = dict()
self._linewidthSpinbox = QtWidgets.QDoubleSpinBox(self)
self._linestyleCombobox = QtWidgets.QComboBox(self)
self._colorWidget = QtWidgets.QWidget(self)
self._rSpinbox = QtWidgets.QDoubleSpinBox(self._colorWidget)
self._gSpinbox = QtWidgets.QDoubleSpinBox(self._colorWidget)
self._bSpinbox = QtWidgets.QDoubleSpinBox(self._colorWidget)
self._aSpinbox = QtWidgets.QDoubleSpinBox(self._colorWidget)
self._linewidthSpinbox.setMinimum(0)
self._linewidthSpinbox.setMaximum(10)
self._linewidthSpinbox.setDecimals(2)
self._linewidthSpinbox.setSingleStep(0.1)
self._linewidthSpinbox.blockSignals(True)
self._linewidthSpinbox.setValue(1)
self._linewidthSpinbox.blockSignals(False)
self._linestyleCombobox.addItems(lineStyles.keys())
self._linestyleCombobox.blockSignals(True)
self._linestyleCombobox.setCurrentText('-')
self._linestyleCombobox.blockSignals(False)
self._rSpinbox.setMinimum(0)
self._rSpinbox.setMaximum(1)
self._rSpinbox.setDecimals(2)
self._rSpinbox.setSingleStep(0.1)
self._rSpinbox.blockSignals(True)
self._rSpinbox.setValue(1)
self._rSpinbox.blockSignals(False)
self._gSpinbox.setMinimum(0)
self._gSpinbox.setMaximum(1)
self._gSpinbox.setDecimals(2)
self._gSpinbox.setSingleStep(0.1)
self._gSpinbox.blockSignals(True)
self._gSpinbox.setValue(1)
self._gSpinbox.blockSignals(False)
self._bSpinbox.setMinimum(0)
self._bSpinbox.setMaximum(1)
self._gSpinbox.setDecimals(2)
self._bSpinbox.setSingleStep(0.1)
self._bSpinbox.blockSignals(True)
self._bSpinbox.setValue(1)
self._bSpinbox.blockSignals(False)
self._aSpinbox.setMinimum(0)
self._aSpinbox.setMaximum(1)
self._aSpinbox.setDecimals(2)
self._aSpinbox.setSingleStep(0.1)
self._aSpinbox.blockSignals(True)
self._aSpinbox.setValue(1.)
self._aSpinbox.blockSignals(False)
gridlayout = QtWidgets.QGridLayout()
gridlayout.addWidget(QtWidgets.QLabel('R'), 0, 0)
gridlayout.addWidget(QtWidgets.QLabel('G'), 0, 1)
gridlayout.addWidget(QtWidgets.QLabel('B'), 0, 2)
gridlayout.addWidget(QtWidgets.QLabel('A'), 0, 3)
gridlayout.addWidget(self._rSpinbox, 1, 0)
gridlayout.addWidget(self._gSpinbox, 1, 1)
gridlayout.addWidget(self._bSpinbox, 1, 2)
gridlayout.addWidget(self._aSpinbox, 1, 3)
self._colorWidget.setLayout(gridlayout)
self._styledict['lw'] = self._linewidthSpinbox.value()
self._styledict['ls'] = self._linestyleCombobox.currentText()
self._styledict['color'] = to_hex([self._rSpinbox.value(), self._gSpinbox.value(), self._bSpinbox.value()])
self._styledict['alpha'] = self._aSpinbox.value()
@pyqtSlot(float, float, float)
def setColorRGB(self, r, g, b):
blocked = self.signalsBlocked()
if not blocked:
self.blockSignals(True)
self.setRValue(r)
self.setGValue(g)
self.setBValue(b)
if not blocked:
self.blockSignals(False)
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(str)
def setColorHex(self, hex):
try:
color = to_rgb(hex)
except ValueError as e:
raise ValueError(f'Cannot set color for {self!r} for hex-string {hex!r}') from e
else:
self.setColorRGB(*color)
@pyqtSlot(float)
def setRValue(self, value):
self._rSpinbox.blockSignals(True)
self._rSpinbox.setValue(value)
self._rSpinbox.blockSignals(False)
self._styledict['color'] = to_hex([self._rSpinbox.value(), self._gSpinbox.value(), self._bSpinbox.value()])
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(float)
def setGValue(self, value):
self._gSpinbox.blockSignals(True)
self._gSpinbox.setValue(value)
self._gSpinbox.blockSignals(False)
self._styledict['color'] = to_hex([self._rSpinbox.value(), self._gSpinbox.value(), self._bSpinbox.value()])
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(float)
def setBValue(self, value):
self._bSpinbox.blockSignals(True)
self._bSpinbox.setValue(value)
self._bSpinbox.blockSignals(False)
self._styledict['color'] = to_hex([self._rSpinbox.value(), self._gSpinbox.value(), self._bSpinbox.value()])
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(float)
def setAValue(self, value):
self._aSpinbox.blockSignals(True)
self._aSpinbox.setValue(value)
self._aSpinbox.blockSignals(False)
self._styledict['alpha'] = self._aSpinbox.value()
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(float)
def setLinewidth(self, value):
self._linewidthSpinbox.blockSignals(True)
self._linewidthSpinbox.setValue(value)
self._linewidthSpinbox.blockSignals(False)
self._styledict['lw'] = self._linewidthSpinbox.value()
self.styleChanged[dict].emit(self.styleDict)
@pyqtSlot(str)
def setLinestyle(self, value):
self._linestyleCombobox.blockSignals(True)
self._linestyleCombobox.setCurrentText(value)
self._linestyleCombobox.blockSignals(False)
self._styledict['ls'] = self._linestyleCombobox.currentText()
self.styleChanged[dict].emit(self._styledict)
@pyqtSlot(dict)
def setStyles(self, styles):
blocked = self.signalsBlocked()
if not blocked:
self.blockSignals(True)
self.setLinestyle(styles['ls'])
self.setLinewidth(styles['lw'])
self.setAValue(styles['alpha'])
self.setColorHex(styles['color'])
if not blocked:
self.blockSignals(False)
self.styleChanged[dict].emit(self.styleDict)
class OpticalOperatorView(QtWidgets.QWidget):
"""
Create a view for an OpticalOperator.
This object provides a series of widgets and setup-tools for the widgets. The widgets are connected to a controller that controls the model, and changes in the model are reflected in the view - as long as the underlying data object (i.e. the OpticalOperator) is changed directly (not through the corresponding OpticalOperatorModel)
"""
value_min = -999
value_max = 999
value_step = 0.1
value_decimals = 2
z_min = -999
z_max = 999
z_step = 0.5
z_decimals = 2
offset_min = -999
offset_max = 999
offset_step = 0.05
offset_decimals = 2
plotUpdated = pyqtSignal(name='plotUpdated')
@property
def model(self):
return self._model
def __init__(self, controller, *args, plot_widget=None, **kwargs):
"""
Create a view for a controller.
The following widgets will be created:
-typeLabel: A QLabel to show the type of the operator
-nameLabel: A QLabel to show the name of the operator
-zSpinbox: A QDoubleSpinBox to control/show the z-position of the operator
-offsetSpinbox: A QDoubleSpinBox to control/show the offset of the operator
-valueSpinbox: A QDoubleSpinBox to control/show the value of the operator
-valueDial: A QDial to control/show the value of the operator through preset values
-valueIndicator: A QLabel to show the current value of the operator below the valueDial.
-zStepSpinbox: A QDoubleSpinBox to control/show the singleStep of the zSpinbox.
-offsetStepSpinbox: A QDoubleSpinBox to control/show the singleStep of the offsetSpinbox.
-valueStepSpinbox: A QDoubleSpinBox to control/show the singleStep of the valueSpinbox.
-plotWidget: A MplWidget to show the operator graphically in a plot area.
:param controller: The controller to connect to. The model will be extracted from this controller.
:param args: Optional positional arguments passed to QtWidgets.QWidget
:param plot_widget: The plot-widget to use to show the optical operator on
:param kwargs: Optional keyword arguments passed to QtWidgets.QWidget
:type controller: OpticalOperatorController
:type plot_widget: MplWidget
"""
super(OpticalOperatorView, self).__init__(*args, **kwargs)
if not isinstance(controller, OpticalOperatorController):
raise TypeError()
self._controller = controller
self._model = self._controller.model
self.typeLabel = QtWidgets.QLabel(self._model.operator_classname, self)
self.nameLabel = QtWidgets.QLabel(self._model.label, self)
self.zSpinbox = QtWidgets.QDoubleSpinBox(self)
self.offsetSpinbox = QtWidgets.QDoubleSpinBox(self)
self.valueSpinbox = QtWidgets.QDoubleSpinBox(self)
self.valueDial = QtWidgets.QDial(self)
self.valueIndicator = QtWidgets.QLabel(self)
self.styleWidget = StyleWidget(self)
if self._model.is_lens:
self.focalStyleWidget = StyleWidget(self)
else:
self.focalStyleWidget = None
# self.zStepSpinbox = QtWidgets.QDoubleSpinBox(self)
# self.offsetStepSpinbox = QtWidgets.QDoubleSpinBox(self)
# self.valueStepSpinbox = QtWidgets.QDoubleSpinBox(self)
# self.zDecimalsSpinbox = QtWidgets.QSpinBox(self)
# self.offsetDecimalsSpinbox = QtWidgets.QSpinBox(self)
# self.valueDecimalsSpinbox = QtWidgets.QSpinBox(self)
# self.zMinimumLineEdit = QtWidgets.QLineEdit(self)
# self.offsetMinimumLineEdit = QtWidgets.QLineEdit(self)
# self.valueMinimumLineEdit = QtWidgets.QLineEdit(self)
# self.zMaximumLineEdit = QtWidgets.QLineEdit(self)
# self.offsetMaximumLineEdit = QtWidgets.QLineEdit(self)
# self.valueMaximumLineEdit = QtWidgets.QLineEdit(self)
if plot_widget is None:
self.plotWidget = MplWidget(self)
else:
if isinstance(plot_widget, MplWidget):
self.plotWidget = plot_widget
else:
raise TypeError(
f'Cannot create {self.__class__.__name__} for controller {self._controller!r} with model {self._model!r}. Provided plotWidget is not a MplWidget but a {type(plot_widget)}')
self._plot_data = None
self.setupZSpinbox()
self.setupValueDial()
self.setupValueSpinbox()
self.setupOffsetSpinbox()
self.setupValueIndicator()
self.styleWidget.setStyles(self._model.style) # Simple setup for the stylewidgets
# Listeners
self._model.valueChanged[float].connect(self.on_value_changed)
self._model.zChanged[float].connect(self.on_z_changed)
self._model.offsetChanged[float].connect(self.on_offset_changed)
self._model.labelChanged[str].connect(self.on_label_changed)
self._model.operatorChanged.connect(lambda: self.on_model_changed())
self._model.styleChanged[dict].connect(self.on_style_changed)
# Signals
self.zSpinbox.valueChanged[float].connect(self._controller.setZ)
self.offsetSpinbox.valueChanged[float].connect(self._controller.setOffset)
self.valueSpinbox.valueChanged[float].connect(self._controller.setFloatValue)
self.valueDial.valueChanged[int].connect(self._controller.setIntValue)
self.styleWidget.styleChanged[dict].connect(lambda x: self._controller.setStyleDict(x, False))
def setupValueSpinbox(self):
"""
Sets up the value spinbox
:return:
"""
self.valueSpinbox.setMinimum(self.value_min)
self.valueSpinbox.setMaximum(self.value_max)
self.valueSpinbox.setDecimals(self.value_decimals)
self.valueSpinbox.setSingleStep(self.value_step)
self.valueSpinbox.blockSignals(True)
self.valueSpinbox.setValue(self._model.value)
self.valueSpinbox.blockSignals(False)
def setupZSpinbox(self):
self.zSpinbox.setMinimum(self.z_min)
self.zSpinbox.setMaximum(self.z_max)
self.zSpinbox.setDecimals(self.z_decimals)
self.zSpinbox.setSingleStep(self.z_step)
self.zSpinbox.blockSignals(True)
self.zSpinbox.setValue(self._model.z)
self.zSpinbox.blockSignals(False)
def setupOffsetSpinbox(self):
if self._model.is_deflector or self._model.is_propagator:
self.offsetSpinbox.setEnabled(False)
else:
self.offsetSpinbox.setMinimum(self.offset_min)
self.offsetSpinbox.setMaximum(self.offset_max)
self.offsetSpinbox.setDecimals(self.offset_decimals)
self.offsetSpinbox.setSingleStep(self.offset_step)
self.offsetSpinbox.blockSignals(True)
self.offsetSpinbox.setValue(self._model.offset)
self.offsetSpinbox.blockSignals(False)
def setupValueDial(self):
if len(self._controller.value_presets) < 2:
self.valueDial.setEnabled(False)
dial_value = None
else:
self.valueDial.setMinimum(min(self._controller.value_presets.keys()))
self.valueDial.setMaximum(max(self._controller.value_presets.keys()))
preset_matches = [key for key in self._controller.value_presets if
self._controller.value_presets[key] == self._model.value]
if len(preset_matches) > 0:
dial_value = min(preset_matches)
else:
dial_value = None
self.valueDial.setTracking(True)
self.valueDial.setNotchesVisible(True)
if dial_value is None:
if self.valueDial.isEnabled():
self.valueDial.setStyleSheet('background-color : lightblue')
else:
pass
else:
self.valueDial.setStyleSheet('background-color : lightgreen')
self.valueDial.blockSignals(True)
self.valueDial.setValue(dial_value)
self.valueDial.blockSignals(False)
def setupValueIndicator(self):
self.valueIndicator.setText(f'{self._model.value}')
@pyqtSlot(float)
def on_z_changed(self, value):
if self.zSpinbox.minimum() > value:
self.zSpinbox.setMinimum(value)
if self.zSpinbox.maximum() < value:
self.zSpinbox.setMaximum(value)
self.zSpinbox.blockSignals(True)
self.zSpinbox.setValue(value)
self.zSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_offset_changed(self, value):
if self.offsetSpinbox.minimum() > value:
self.offsetSpinbox.setMinimum(value)
if self.offsetSpinbox.maximum() < value:
self.offsetSpinbox.setMaximum(value)
self.offsetSpinbox.blockSignals(True)
self.offsetSpinbox.setValue(value)
self.offsetSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_value_changed(self, value):
if self.valueSpinbox.minimum() > value:
self.valueSpinbox.setMinimum(value)
if self.valueSpinbox.maximum() < value:
self.valueSpinbox.setMaximum(value)
self.valueSpinbox.blockSignals(True)
self.valueSpinbox.setValue(value)
self.valueSpinbox.blockSignals(False)
preset_values = [key for key in self._controller.value_presets if self._controller.value_presets[key] == value]
if len(preset_values) == 0:
self.valueDial.setStyleSheet('background-color : lightblue')
else:
self.valueDial.setStyleSheet('background-color : lightgreen')
self.valueDial.blockSignals(True)
self.valueDial.setValue(preset_values[0])
self.valueDial.blockSignals(False)
self.valueIndicator.setText(f'{value}')
@pyqtSlot(str)
def on_label_changed(self, value):
self.nameLabel.setText(value)
def on_model_changed(self, *args, **kwargs):
kwargs.update({'ax': self.plotWidget.canvas.ax})
if self._plot_data is None:
_, _, self._plot_data = self._model.show(*args, **kwargs)
else:
if self._model.is_deflector:
self._plot_data[0].set_ydata([self._model.z, self._model.z])
elif self._model.is_lens:
[line.set_ydata([z, z]) for z, line in
zip([self._model.z, self._model.z + self._model.value, self._model.z - self._model.value],
self._plot_data)]
self.plotUpdated.emit()
@pyqtSlot(dict)
def on_style_changed(self, style):
self.styleWidget.blockSignals(True)
self.styleWidget.setStyles(style)
self.styleWidget.blockSignals(False)
self.on_model_changed()
class SourceModel(QtCore.QObject):
"""
Model for controlling a Source
The model should ensure that proper signals are sent whenever the data of the Source has been changed.
The model emits the following signals:
:param zChanged: Signal ([], [float]) emitted whenever the z-value of the Source has changed.
:param offsetChanged: Signal ([], [float]) emitted whenever the offset-value of the Source has changed.
:param sizeChanged: Signal ([], [float]) emitted whenever the size-value of the Source has changed.
:param anglesChanged: Signal ([]) emitted whenever the angles of the Source has changed.
:param pointsChanged: Signal ([], [int]) emitted whenever the points-value of the Source has changed.
:param sourceChanged: Signal wmitted whenever any change has been made to the Source, inculding the above.
"""
zChanged = pyqtSignal([], [float], name='zChanged')
offsetChanged = pyqtSignal([], [float], name='offsetChanged')
sizeChanged = pyqtSignal([], [float], name='sizeChanged')
anglesChanged = pyqtSignal([], [np.ndarray], name='anglesChanged')
pointsChanged = pyqtSignal([], [int], name='pointsChanged')
sourceChanged = pyqtSignal(name='operatorChanged')
@property
def z(self):
return self._source.z
@z.setter
def z(self, value):
if isinstance(value, float):
self._source.z = value
self.zChanged.emit()
self.zChanged[float].emit(value)
self.sourceChanged.emit()
else:
raise SourceModelError(
f'Cannot set Z-value of {self.__class__.__name__} of {self._source!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def offset(self):
return self._source.offset
@offset.setter
def offset(self, value):
if isinstance(value, float):
self._source.offset = value
self.offsetChanged.emit()
self.offsetChanged[float].emit(value)
self.sourceChanged.emit()
else:
raise SourceModelError(
f'Cannot set offset-value of {self.__class__.__name__} of {self._source!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def angles(self):
return self._source.angles
@angles.setter
def angles(self, value):
if isinstance(value, (list, tuple, np.ndarray)):
if len(np.shape(value)) == 1:
self._source.angles = np.array(value)
self.anglesChanged.emit()
self.angelesChanged[np.ndarray].emit(np.array(value))
self.operatorChanged.emit()
else:
raise SourceModelError(
f'Cannot set angles of {self.__class__.__name__} of {self._source!r}.') from ValueError(
f'Argument {value!r} has invalid shape {np.shape(value)} != (1,).')
else:
raise SourceModelError(
f'Cannot set angles of {self.__class__.__name__} of {self._source!r}.') from TypeError(
f'Value {value!r} must be `tuple`, `list`, or `np.ndarray`.')
@property
def size(self):
return self._source.size
@size.setter
def size(self, value):
if isinstance(value, float):
self._source.size = value
self.sizeChanged.emit()
self.sizeChanged[float].emit(value)
self.sourceChanged.emit()
else:
raise SourceModelError(
f'Cannot set size-value of {self.__class__.__name__} of {self._source!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def points(self):
return self._source.points
@points.setter
def points(self, value):
if isinstance(value, int):
self._source.points = value
self.pointsChanged.emit()
self.pointsChanged[int].emit(value)
self.sourceChanged.emit()
else:
raise SourceModelError(
f'Cannot set points-value of {self.__class__.__name__} of {self._source!r}.') from TypeError(
f'Value {value!r} must be `int`')
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, value):
self._silent = bool(value)
self.blockSignals(self._silent)
def __init__(self, source, *args, **kwargs):
"""
Create a model for a Source
:param source: The Source to model
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type source: Source
"""
super(SourceModel, self).__init__(*args, **kwargs)
if not isinstance(source, Source):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {source!r}. Invalid type {type(source)}. Accepted types are OpticalOperator and subclasses.')
self._source = source
self._silent = False
def __repr__(self):
return f'{self.__class__.__name__}({self._source!r}, {self.parent()})'
def __str__(self):
return f'{self._source}'
class SourceController(QtCore.QObject):
"""
Controller for controlling a SourceModel
"""
@property
def model(self):
return self._model
def __init__(self, model, *args, **kwargs):
"""
Create a controller for a SourceModel
:param model: The model to control
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type model: SourceModel
"""
super(SourceController, self).__init__(*args, **kwargs)
if not isinstance(model, SourceModel):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {model!r}. Invalid type {type(model)}. Accepted type is `SourceModel`')
self._model = model
@pyqtSlot(int, name='setSilent')
@pyqtSlot(bool, name='setSilent')
@pyqtSlot(float, name='setSilent')
def setSilent(self, value):
"""
Disable signals from the model
:param value: whether to disable or enable signals
:param value: Union[int, float, bool]
:return:
"""
self._model.silent = value
@pyqtSlot(float, name='setZ')
def setZ(self, value):
"""
Set the z-position of the model
:param value: z-value
:type value: float
"""
self._model.z = value
@pyqtSlot(float, name='setOffset')
def setOffset(self, value):
"""
Set the offset-value of the model
:param value: offset-value
:type value: float
:return:
"""
self._model.offset = value
@pyqtSlot(float, name='setAngleMin')
def setAngleMin(self, value):
"""
Set the minimum angle of the source model
:param value: minimum angle
:type value: float
:return:
"""
self._model.angles = np.linspace(value, np.max(self._model.angles), num=len(self._model.angles))
@pyqtSlot(float, name='setAngleMax')
def setAngleMax(self, value):
"""
Set the maximum angle of the source model
:param value: maximum angle
:type value: float
:return:
"""
self._model.angles = np.linspace(np.min(self._model.angles), value, num=len(self._model.angles))
@pyqtSlot(int, name='setAngleNumber')
def setAngleNumber(self, value):
"""
Set the number of angles of the source model
:param value: the number of angles
:type value: int
:return:
"""
self._model.angles = np.linspace(np.min(self._model.angles), np.max(self._model.angles), num=value)
@pyqtSlot(list, name='setAngles')
@pyqtSlot(tuple, name='setAngles')
@pyqtSlot(np.ndarray, name='setAngles')
def setAngles(self, value):
"""
Set the angles of the model
:param value: the angles
:type value: Union[list, tuple, np.ndarray]
:return:
"""
self._model.angles = np.array(value)
@pyqtSlot(float, name='addAngle')
def addAngle(self, value):
"""
Add an angle to the source
:param value: The angle to add
:type value: float
:return:
"""
self._model.angles = np.array(list(self._model.angles) + [value])
@pyqtSlot(float, name='setSize')
def setSize(self, value):
"""
Set the size-value of the model
:param value: size-value
:type value: float
:return:
"""
self._model.size = value
@pyqtSlot(int, name='setPoints')
def setPoints(self, value):
"""
Set the number of points to emit rays from
:param value: The number of points
:type value: int
:return:
"""
self._model.points = value
@pyqtSlot(str, float)
def setParameter(self, parameter, value):
"""
Sets a given parameter to a given value
:param parameter: The parameter to set. Should be either "z", "offset", "size", or "angle"
:param value: The value to set
:type parameter: str
:type value: float
:return:
"""
if parameter.lower() == 'z':
self.setZ(value)
elif parameter.lower() == 'offset':
self.setOffset(value)
elif parameter.lower() == 'size':
self.setSize(value)
elif parameter.lower() == 'angle':
self.addAngle(value)
else:
raise ValueError(f'Could not set parameter {parameter} to {value} for {self!r}: Parameter not recognized.')
class SourceView(QtWidgets.QWidget):
"""
Create a view for a SourceModel.
This object provides a series of widgets and setup-tools for the widgets. The widgets are connected to a controller that controls the model, and changes in the model are reflected in the view - as long as the underlying data object (i.e. the Source) is changed directly (not through the corresponding SourceModel)
"""
size_min = -999
size_max = 999
size_step = 0.01
value_decimals = 2
size_points_min = 1
size_points_max = 50
size_points_step = 1
z_min = -999
z_max = 999
z_step = 0.5
z_decimals = 2
offset_min = -999
offset_max = 999
offset_step = 0.05
offset_decimals = 2
angles_min = -90
angles_max = 90
angles_step = 0.01
angles_decimals = 2
angles_points_min = 1
angles_points_max = 50
angles_points_step = 1
@property
def model(self):
return self._model
def __init__(self, controller, *args, **kwargs):
"""
Create a view for a controller.
The following widgets will be created:
-zSpinbox: A QDoubleSpinBox to control/show the z-position of the source
-offsetSpinbox: A QDoubleSpinBox to control/show the offset of the source
-sizeSpinBox: A QDoubleSpinBox to control/show the size of the source
-pointsSpinBox: A QSpinBox to control/show the number of points to emit rays from the source for
-anglesMinSpinBox: A QDoubleSpinBox to control/show the minimum angle to emit
-anglesMaxSpinBox: A QDoubleSpinBox to control/show the maximum angle to emit
-anglesNumberSpinBox: A QSpinBox to control/show the number of angles to emit from each point.
:param controller: The controller to connect to. The model will be extracted from this controller.
:param args: Optional positional arguments passed to QtWidgets.QWidget
:param kwargs: Optional keyword arguments passed to QtWidgets.QWidget
:type controller: SourceController
"""
super(SourceView, self).__init__(*args, **kwargs)
if not isinstance(controller, SourceController):
raise TypeError()
self._controller = controller
self._model = self._controller.model
self.zSpinbox = QtWidgets.QDoubleSpinBox(self)
self.offsetSpinbox = QtWidgets.QDoubleSpinBox(self)
self.sizeSpinbox = QtWidgets.QDoubleSpinBox(self)
self.pointsSpinBox = QtWidgets.QSpinBox(self)
self.anglesMinSpinBox = QtWidgets.QDoubleSpinBox(self)
self.anglesMaxSpinBox = QtWidgets.QDoubleSpinBox(self)
self.anglesNumberSpinBox = QtWidgets.QSpinBox(self)
self.setupZSpinbox()
self.setupOffsetSpinbox()
self.setupSizeSpinbox()
self.setupAnglesSpinBox()
# Listeners
self._model.zChanged[float].connect(self.on_z_changed)
self._model.offsetChanged[float].connect(self.on_offset_changed)
self._model.sizeChanged[float].connect(self.on_size_changed)
self._model.anglesChanged[np.ndarra].connect(self.on_angles_changed)
self._model.pointsChanged[int].connect(self.on_points_changed)
# Signals
self.zSpinbox.valueChanged[float].connect(self._controller.setZ)
self.offsetSpinbox.valueChanged[float].connect(self._controller.setOffset)
self.sizeSpinbox.valueChanged[float].connect(self._controller.setSize)
self.pointsSpinBox.valueChanged[int].connect(self._controller.setPoints)
self.anglesMinSpinBox.valueChanged[float].connect(self._controller.setAngleMin)
self.anglesMaxSpinBox.valueChanged[float].connect(self._controller.setAngleMax)
self.anglesNumberSpinBox.valueChanged[int].connect(self._controller.setAngleNumber)
def setupZSpinbox(self):
self.zSpinbox.setMinimum(self.z_min)
self.zSpinbox.setMaximum(self.z_max)
self.zSpinbox.setDecimals(self.z_decimals)
self.zSpinbox.setSingleStep(self.z_step)
self.zSpinbox.blockSignals(True)
self.zSpinbox.setValue(self._model.z)
self.zSpinbox.blockSignals(False)
def setupOffsetSpinbox(self):
self.offsetSpinbox.setMinimum(self.offset_min)
self.offsetSpinbox.setMaximum(self.offset_max)
self.offsetSpinbox.setDecimals(self.offset_decimals)
self.offsetSpinbox.setSingleStep(self.offset_step)
self.offsetSpinbox.blockSignals(True)
self.offsetSpinbox.setValue(self._model.offset)
self.offsetSpinbox.blockSignals(False)
def setupSizeSpinbox(self):
self.sizeSpinbox.setMinimum(self.size_min)
self.sizeSpinbox.setMaximum(self.size_max)
self.sizeSpinbox.setDecimals(self.size_decimals)
self.sizeSpinbox.setSingleStep(self.size_step)
self.sizeSpinbox.blockSignals(True)
self.sizeSpinbox.setValue(self._model.size)
self.sizeSpinbox.blockSignals(False)
self.pointsSpinbox.setMinimum(self.size_points_min)
self.pointsSpinbox.setMaximum(self.size_points_max)
self.pointsSpinbox.setSingleStep(self.size_points_step)
self.pointsSpinbox.blockSignals(True)
self.pointsSpinbox.setValue(self._model.points)
self.pointsSpinbox.blockSignals(False)
def setupAnglesSpinbox(self):
self.anglesMinSpinbox.setMinimum(self.anglesMin_min)
self.anglesMinSpinbox.setMaximum(self.anglesMin_max)
self.anglesMinSpinbox.setDecimals(self.anglesMin_decimals)
self.anglesMinSpinbox.setSingleStep(self.anglesMin_step)
self.anglesMinSpinbox.blockSignals(True)
self.anglesMinSpinbox.setValue(self._model.anglesMin)
self.anglesMinSpinbox.blockSignals(False)
self.anglesMaxSpinbox.setMinimum(self.anglesMax_min)
self.anglesMaxSpinbox.setMaximum(self.anglesMax_max)
self.anglesMaxSpinbox.setDecimals(self.anglesMax_decimals)
self.anglesMaxSpinbox.setSingleStep(self.anglesMax_step)
self.anglesMaxSpinbox.blockSignals(True)
self.anglesMaxSpinbox.setValue(self._model.anglesMax)
self.anglesMaxSpinbox.blockSignals(False)
self.anglesNumberSpinbox.setMinimum(self.angles_points_min)
self.anglesNumberSpinbox.setMaximum(self.angles_points_max)
self.anglesNumberSpinbox.setSingleStep(self.angles_points_step)
self.anglesNumberSpinbox.blockSignals(True)
self.anglesNumberSpinbox.setValue(len(self._model.angles))
self.anglesNumberSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_z_changed(self, value):
if self.zSpinbox.minimum() > value:
self.zSpinbox.setMinimum(value)
if self.zSpinbox.maximum() < value:
self.zSpinbox.setMaximum(value)
self.zSpinbox.blockSignals(True)
self.zSpinbox.setValue(value)
self.zSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_offset_changed(self, value):
if self.offsetSpinbox.minimum() > value:
self.offsetSpinbox.setMinimum(value)
if self.offsetSpinbox.maximum() < value:
self.offsetSpinbox.setMaximum(value)
self.offsetSpinbox.blockSignals(True)
self.offsetSpinbox.setValue(value)
self.offsetSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_size_changed(self, value):
if self.sizeSpinbox.minimum() > value:
self.sizeSpinbox.setMinimum(value)
if self.sizeSpinbox.maximum() < value:
self.sizeSpinbox.setMaximum(value)
self.sizeSpinbox.blockSignals(True)
self.sizeSpinbox.setValue(value)
self.sizeSpinbox.blockSignals(False)
@pyqtSlot(float)
def on_points_changed(self, value):
if self.pointsSpinbox.minimum() > value:
self.pointsSpinbox.setMinimum(value)
if self.pointsSpinbox.maximum() < value:
self.pointsSpinbox.setMaximum(value)
self.pointsSpinbox.blockSignals(True)
self.pointsSpinbox.setValue(value)
self.pointsSpinbox.blockSignals(False)
@pyqtSlot(np.ndarray)
def on_angles_changed(self, value):
minimum = np.min(value)
maximum = np.maximum(value)
n = len(value)
if self.anglesMinSpinBox.minimum() > minimum:
self.anglesMinSpinbox.setMinimum(minimum)
if self.anglesMinSpinbox.maximum() < minimum:
self.anglesMinSpinbox.setMaximum(minimum)
if self.anglesMaxSpinBox.maximum() > maximum:
self.anglesMaxSpinbox.setMinimum(maximum)
if self.anglesMaxSpinbox.maximum() < maximum:
self.anglesMaxSpinbox.setMaximum(maximum)
if self.anglesNumberSpinBox.minimum() > n:
self.anglesNumberSpinbox.setMinimum(n)
if self.anglesNumberSpinbox.maximum() < n:
self.anglesNumberSpinbox.setMaximum(n)
self.anglesMinSpinbox.blockSignals(True)
self.anglesMaxSpinbox.blockSignals(True)
self.anglesNumberSpinbox.blockSignals(True)
self.anglesMinSpinBox.setValue(minimum)
self.anglesMinSpinBox.setValue(maximum)
self.anglesNumberSpinBox.setValue(n)
self.anglesMinSpinbox.blockSignals(False)
self.anglesMaxSpinbox.blockSignals(False)
self.anglesNumberSpinbox.blockSignals(False)
class ScreenModel(QtCore.QObject):
"""
Model for controlling a Screen
The model should ensure that proper signals are sent whenever the data of the Screen has been changed.
The model emits the following signals:
:param zChanged: Signal ([], [float]) emitted whenever the z-value of the Screen has changed.
:param ScreenChanged: Signal emitted whenever any change has been made to the Screen, inculding the above.
"""
zChanged = pyqtSignal([], [float], name='zChanged')
screenChanged = pyqtSignal(name='operatorChanged')
@property
def z(self):
return self._screen.z
@z.setter
def z(self, value):
if isinstance(value, float):
self.screen.z = value
self.zChanged.emit()
self.zChanged[float].emit(value)
self.screenChanged.emit()
else:
raise ScreenModelError(
f'Cannot set Z-value of {self.__class__.__name__} of {self._screen!r}.') from TypeError(
f'Value {value!r} must be `float`')
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, value):
self._silent = bool(value)
self.blockSignals(self._silent)
def __init__(self, screen, *args, **kwargs):
"""
Create a model for a Screen
:param screen: The Screen to model
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type screen: Screen
"""
super(ScreenModel, self).__init__(*args, **kwargs)
if not isinstance(screen, Screen):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {screen!r}. Invalid type {type(screen)}. Accepted type is Screen.')
self._screen = screen
self._silent = False
def __repr__(self):
return f'{self.__class__.__name__}({self._screen!r}, {self.parent()})'
def __str__(self):
return f'{self._screen}'
class ScreenController(QtCore.QObject):
"""
Controller for controlling a ScreenModel
"""
@property
def model(self):
return self._model
def __init__(self, model, *args, **kwargs):
"""
Create a controller for a ScreenModel
:param model: The model to control
:param args: Optional positional arguments passed to QtCore.QObject constructor
:param kwargs: Optional keyword arguments passed to QtCore.QObject constructor
:type model: ScreenModel
"""
super(ScreenController, self).__init__(*args, **kwargs)
if not isinstance(model, ScreenModel):
raise TypeError(
f'Cannot create {self.__class__.__name__} for {model!r}. Invalid type {type(model)}. Accepted type is `ScreenModel`')
self._model = model
@pyqtSlot(int, name='setSilent')
@pyqtSlot(bool, name='setSilent')
@pyqtSlot(float, name='setSilent')
def setSilent(self, value):
"""
Disable signals from the model
:param value: whether to disable or enable signals
:param value: Union[int, float, bool]
:return:
"""
self._model.silent = value
@pyqtSlot(float, name='setZ')
def setZ(self, value):
"""
Set the z-position of the model
:param value: z-value
:type value: float
"""
self._model.z = value
@pyqtSlot(str, float)
def setParameter(self, parameter, value):
"""
Sets a given parameter to a given value
:param parameter: The parameter to set. Should be "z"
:param value: The value to set
:type parameter: str
:type value: float
:return:
"""
if parameter.lower() == 'z':
self.setZ(value)
else:
raise ValueError(f'Could not set parameter {parameter} to {value} for {self!r}: Parameter not recognized.')
#WIP: Make ScreenView
class MicroscopeModel(QtCore.QObject):
modelChanged = pyqtSignal([], name='modelChanged')
systemFilled = pyqtSignal([], name='systemFilled')
systemTraced = pyqtSignal([list], name='systemTraced')
@property
def operatorModels(self):
return [model for model in self._operatorModels]
@property
def sourceModel(self):
return self._sourceModel
@property
def screenModel(self):
return self._screenModel
def __init__(self, optical_system, *args, **kwargs):
super(MicroscopeModel, self).__init__(*args, **kwargs)
if not isinstance(optical_system, OpticalSystem):
raise TypeError(
f'Cannot create {self.__class__.__name__} for source: {optical_system!r}. Expected type OpticalSystem not {type(optical_system)}')
self._optical_system = optical_system
self._sourceModel = SourceModel(optical_system.source)
self._screenModel = ScreenModel(optical_system.screen)
self._operatorModels = [OpticalOperatorModel(operator, self.parent()) for operator in self._optical_system]
def __iter__(self):
for obj in [self.sourceModel] + self.operatorModels + [self.screenModel]:
yield obj
@pyqtSlot()
def fillSystem(self):
self._optical_system.fill()
self.systemFilled.emit()
@pyqtSlot(name='trace', result=list)
def trace(self):
self.blockSignals(True)
self.fillSystem()
self.blockSignals(False)
traces = self._optical_system.trace
self.systemTraced[list].emit(traces)
return traces
@pyqtSlot(name='printSystem')
def printSystem(self):
print(self._optical_system)
@pyqtSlot(name='printTraces')
def printTraces(self):
traces = self._optical_system.trace
for trace in traces:
print(f'Trace {trace.label}:')
t = tabulate([[i, ray.x, ray.angle_deg, ray.z] for i, ray in enumerate(trace)],
headers=['#', 'X', 'Angle [deg]', 'Z'])
print(t)
class MicroscopeController(QtCore.QObject):
@property
def model(self):
return self._model
@property
def sourceController(self):
return self._sourceController
@property
def screenController(self):
return self._screenController
@property
def operatorControllers(self):
return [controller for controller in self._operatorControllers]
def __init__(self, model, *args, **kwargs):
super(MicroscopeController, self).__init__(*args, **kwargs)
if not isinstance(model, MicroscopeModel):
raise TypeError(
f'Cannot create {self.__class__.__name__} for model: {model!r}. Expected type MicroscopeModel not {type(model)}')
self._model = model
self._sourceController = None
self._screenController = None
self._operatorControllers = [OpticalOperatorController(model) for model in self._model.operatorModels if
(model.is_lens or model.is_deflector)]
def __iter__(self):
for obj in [self.sourceController] + self.operatorControllers + [self.screenController]:
yield obj
@pyqtSlot(str, str, float)
def setOperatorParameterByName(self, name, parameter, value):
print(f'Setting {name} {parameter}={value}')
changes = len([controller.setParameter(parameter, value) for controller in self._operatorControllers if
controller.model_name == name])
if changes > 0:
self._model.modelChanged.emit()
@pyqtSlot(name='trace', result=list)
def trace(self):
return self._model.trace()
class MicroscopeView(QtWidgets.QMainWindow):
# colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# colors = plt.get_cmap('inferno', 10)
colors = plt.get_cmap('tab20', 10)
@property
def screenView(self):
return self._screenView
@property
def sourceView(self):
return self._sourceView
@property
def operatorViews(self):
return [view for view in self._operatorViews]
def __init__(self, controller, *args, **kwargs):
super(MicroscopeView, self).__init__(*args, **kwargs)
if not isinstance(controller, MicroscopeController):
raise TypeError()
self._controller = controller
self._model = self._controller.model
self.plot_widget = MplWidget(self)
self.lens_widgets = QtWidgets.QWidget(self)
self.lens_widgets.setLayout(QtWidgets.QGridLayout())
self.plot_button = QtWidgets.QPushButton('Plot')
self.print_system_button = QtWidgets.QPushButton('Print system')
self.print_traces_button = QtWidgets.QPushButton('Print rays')
self._screenView = None
self._sourceView = None
self._operatorViews = [OpticalOperatorView(controller, self, plot_widget=self.plot_widget) for controller in
self._controller.operatorControllers]
self._trace_lines = None
self.setCentralWidget(QtWidgets.QWidget(self))
self.centralWidget().setLayout(QtWidgets.QGridLayout())
self.centralWidget().layout().addWidget(self.plot_widget, 0, 0)
self.centralWidget().layout().addWidget(self.lens_widgets, 0, 1)
self.centralWidget().layout().addWidget(self.plot_button, 1, 0)
self.centralWidget().layout().addWidget(self.print_system_button, 2, 0)
self.centralWidget().layout().addWidget(self.print_traces_button, 3, 0)
self.lensStyleWindow = QtWidgets.QMainWindow()
self.lensStyleWindow.setCentralWidget(QtWidgets.QWidget())
self.lensStyleWindow.centralWidget().setLayout(QtWidgets.QGridLayout())
self.lensStyleWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('Name'), 0, 0)
self.lensStyleWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('Style'), 0, 1)
self.lensStyleWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('Width'), 0, 2)
self.lensStyleWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('Color'), 0, 4)
[self.lensStyleWindow.centralWidget().layout().addWidget(QtWidgets.QLabel(f'{view.nameLabel.text()}'), i + 1, 0)
for
i, view in enumerate(self.operatorViews) if view.model.is_lens]
[self.lensStyleWindow.centralWidget().layout().addWidget(view.styleWidget.widgets['style'], i + 1, 1) for
i, view in enumerate(self.operatorViews) if view.model.is_lens]
[self.lensStyleWindow.centralWidget().layout().addWidget(view.styleWidget.widgets['width'], i + 1, 2) for
i, view in enumerate(self.operatorViews) if view.model.is_lens]
[self.lensStyleWindow.centralWidget().layout().addWidget(view.styleWidget.widgets['color'], i + 1, 4) for
i, view in enumerate(self.operatorViews) if view.model.is_lens]
# [v for view in self.operatorViews]
menubar = self.menuBar()
self.controlMenu = menubar.addMenu('Controls')
self.operatorAction = QtWidgets.QAction('&Operators', self)
self.sourceAction = QtWidgets.QAction('&Source', self)
self.screenAction = QtWidgets.QAction('&Screen', self)
self.controlMenu.addAction(self.operatorAction)
self.controlMenu.addAction(self.sourceAction)
self.controlMenu.addAction(self.screenAction)
self.styleMenu = menubar.addMenu('Styles')
self.lensStyleAction = QtWidgets.QAction('&Lenses', self)
self.deflectorStyleAction = QtWidgets.QAction('&Deflectors', self)
self.rayStyleAction = QtWidgets.QAction('&Rays', self)
self.styleMenu.addAction(self.lensStyleAction)
self.styleMenu.addAction(self.deflectorStyleAction)
self.styleMenu.addAction(self.rayStyleAction)
self.lensStyleAction.triggered.connect(self.openLensStyle)
# Source control
self.sourceControlWindow = QtWidgets.QMainWindow()
self.sourceControlWindow.setCentralWidget(QtWidgets.QWidget())
self.sourceControlWindow.centralWidget().setLayout(QtWidgets.QGridLayout())
self.sourceAngleMinimumSpinBox = QtWidgets.QDoubleSpinBox()
self.sourceAngleMinimumSpinBox.setMinimum(-90)
self.sourceAngleMinimumSpinBox.setMaximum(0)
self.sourceAngleMinimumSpinBox.setDecimals(2)
self.sourceAngleMinimumSpinBox.setSingleStep(0.01)
self.sourceAngleMinimumSpinBox.setValue(-0.10)
self.sourceAngleMaximumSpinBox = QtWidgets.QDoubleSpinBox()
self.sourceAngleMaximumSpinBox.setMinimum(0)
self.sourceAngleMaximumSpinBox.setMaximum(90)
self.sourceAngleMaximumSpinBox.setDecimals(2)
self.sourceAngleMaximumSpinBox.setSingleStep(0.01)
self.sourceAngleMaximumSpinBox.setValue(0.10)
self.sourceAngles = QtWidgets.QSpinBox()
self.sourceAngles.setMinimum(1)
self.sourceAngles.setMaximum(500)
self.sourceAngles.setSingleStep(1)
self.sourceAngles.setValue(3)
self.sourceControlWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('Angular range from'))
self.sourceControlWindow.centralWidget().layout().addWidget(self.sourceAngleMinimumSpinBox)
self.sourceControlWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('to'))
self.sourceControlWindow.centralWidget().layout().addWidget(self.sourceAngleMaximumSpinBox)
self.sourceControlWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('in'))
self.sourceControlWindow.centralWidget().layout().addWidget(self.sourceAngles)
self.sourceControlWindow.centralWidget().layout().addWidget(QtWidgets.QLabel('steps'))
self.sourceAction.triggered.connect(self.openSourceControl)
self.operatorAction.triggered.connect(self.openOperatorControl)
self.screenAction.triggered.connect(self.openScreenControl)
# Signals
self.plot_button.clicked.connect(self.on_model_changed)
self.print_system_button.clicked.connect(self._model.printSystem)
[view.plotUpdated.connect(self._model.modelChanged) for view in self._operatorViews]
self.print_traces_button.clicked.connect(self._model.printTraces)
# Listeners
self._model.modelChanged.connect(self.on_model_changed)
self._model.systemTraced[list].connect(self.on_retraced)
self.setup_lens_widgets()
# show lenses
[operator_view.on_model_changed(annotate=False) for operator_view in self._operatorViews]
# Run raytracing and update the plot fo an initial inspection
self.on_model_changed()
def setup_lens_widgets(self):
self.lens_widgets.layout().addWidget(QtWidgets.QLabel('Type', self.lens_widgets), 0, 0)
self.lens_widgets.layout().addWidget(QtWidgets.QLabel('Name', self.lens_widgets), 0, 1)
self.lens_widgets.layout().addWidget(QtWidgets.QLabel('Z', self.lens_widgets), 0, 2)
self.lens_widgets.layout().addWidget(QtWidgets.QLabel('Offset', self.lens_widgets), 0, 3)
self.lens_widgets.layout().addWidget(QtWidgets.QLabel('Value', self.lens_widgets), 0, 4)
for i, view in enumerate(self.operatorViews):
self.lens_widgets.layout().addWidget(view.typeLabel, i + 1, 0)
self.lens_widgets.layout().addWidget(view.nameLabel, i + 1, 1)
self.lens_widgets.layout().addWidget(view.zSpinbox, i + 1, 2)
self.lens_widgets.layout().addWidget(view.offsetSpinbox, i + 1, 3)
self.lens_widgets.layout().addWidget(view.valueSpinbox, i + 1, 4)
@pyqtSlot(list, name='on_retraced')
def on_retraced(self, traces):
if len(traces) > self.colors.N:
self.colors = plt.get_cmap(self.colors.name, len(traces))
if self._trace_lines is not None:
[line[0].remove() for line in self._trace_lines]
self.plot_widget.canvas.ax.set_prop_cycle(None)
colors = {}
for trace in traces:
if trace[0].x in colors:
pass
else:
# colors[trace[0].x] = self.colors[len(colors)]
colors[trace[0].x] = self.colors(len(colors) / len(traces))
self._trace_lines = [trace.show(ax=self.plot_widget.canvas.ax, annotate=False, color=colors[trace[0].x])[2] for
i, trace in enumerate(traces)]
xs = [[ray.x for ray in raytrace] for raytrace in traces]
minimum_x = min([min(x) for x in xs])
maximum_x = max([max(x) for x in xs])
ys = [[ray.z for ray in raytrace] for raytrace in traces]
minimum_y = min([min(y) for y in ys])
maximum_y = max([max(y) for y in ys])
ticks = [(operator.z, operator.label) for operator in self._model.operatorModels if
(operator.is_deflector or operator.is_lens)]
additional_ticks = [(operator.z + operator.value, f'{operator.label} FFP') for operator in
self._model.operatorModels if operator.is_lens]
additional_ticks.extend(
[(operator.z - operator.value, f'{operator.label} BFP') for operator in self._model.operatorModels if
operator.is_lens])
ticks.extend(additional_ticks)
self.plot_widget.canvas.ax.set_yticks([tick[0] for tick in ticks])
self.plot_widget.canvas.ax.set_yticklabels([tick[1] for tick in ticks])
self.plot_widget.canvas.ax.set_xlim(minimum_x, maximum_x)
self.plot_widget.canvas.ax.set_ylim(minimum_y, maximum_y)
print('Plot updated')
self.plot_widget.canvas.draw()
@pyqtSlot()
def on_model_changed(self):
self._model.trace()
@pyqtSlot()
def openLensStyle(self):
self.lensStyleWindow.show()
@pyqtSlot()
def openSourceControl(self):
self.sourceControlWindow.show()
@pyqtSlot()
def openScreenControl(self):
self.screenView.show()
@pyqtSlot()
def openOperatorControl(self):
pass
# self.operatorViews.show()
def full_column(angles=(-1, 0, 1), size=0, n_points=1):
mygui = QtWidgets.QApplication(sys.argv)
source = Source(150, angles, size=size, points=n_points)
screen = Screen(-100)
GUN1 = Deflector(0, label='GUN1', z=95)
GUN2 = Deflector(0, label='GUN2', z=85)
CL1 = Lens(10, label='CL1', z=80)
CL2 = Lens(10, label='CL2', z=70)
CL3 = Lens(10, label='CL3', z=60)
CLA1 = Deflector(0, label='CLA1', z=50)
CLA2 = Deflector(0, label='CLA2', z=40)
CM = Lens(10, label='CM', z=30)
OLPre = Lens(10, label='OLPre', z=5)
OLPost = Lens(10, label='OLPost', z=-5)
OM = Lens(10, label='OM', z=-15)
ILA1 = Deflector(0, label='ILA1', z=-25)
ILA2 = Deflector(0, label='ILA2', z=-30)
IL1 = Lens(10, label='IL1', z=-40)
IL2 = Lens(10, label='IL2', z=-50)
IL3 = Lens(10, label='IL3', z=-60)
PLA = Deflector(0, label='PLA', z=-70)
PL = Lens(10, label='PLA', z=-80)
optical_system = OpticalSystem(source,
[GUN1, GUN2, CL1, CL2, CL3, CLA1, CLA2, CM, OLPre, OLPost, OM, ILA1, ILA2, IL1, IL2,
IL3, PLA, PL], screen)
microscope_model = MicroscopeModel(optical_system)
microscope_controller = MicroscopeController(microscope_model)
microscope_view = MicroscopeView(microscope_controller)
microscope_view.show()
sys.exit(mygui.exec_())
def condenser_system(angles=(-1, 0, 1), size=0, n_points=1):
mygui = QtWidgets.QApplication(sys.argv)
source = Source(100, angles, size=size, points=n_points)
screen = Screen(0)
CL1 = Lens(6.3, label='CL1', z=82)
CL3 = Lens(8, label='CL3', z=60)
CLA1 = Deflector(0, label='CLA1', z=49)
CLA2 = Deflector(0, label='CLA2', z=42.5)
CM = Lens(10, label='CM', z=27)
OLPre = Lens(8.5, label='OLPre', z=8.5)
optical_system = OpticalSystem(source,
[CL1, CL3, CLA1, CLA2, CM, OLPre], screen)
microscope_model = MicroscopeModel(optical_system)
microscope_controller = MicroscopeController(microscope_model)
microscope_view = MicroscopeView(microscope_controller)
microscope_view.show()
sys.exit(mygui.exec_())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--system', type=str, default='full', choices=['full', 'condenser', 'imaging'],
help='The system to show, i.e. the condenser, imaging, or full system.')
parser.add_argument('--min_angle', dest='min_angle', type=float, default=-1,
help='The minimum angle to emit from the source')
parser.add_argument('--max_angle', dest='max_angle', type=float, default=1,
help='The maximum angle to emit from the source')
parser.add_argument('--n_angles', dest='n_angles', type=int, default=3,
help='The number of angles to emit from the source')
parser.add_argument('--source_size', dest='source_size', type=float, default=0.0, help='The size of the source')
parser.add_argument('--source_points', dest='source_points', type=int, default=1,
help='The number of points to emit beams from the source')
arguments = parser.parse_args()
angles = np.linspace(arguments.min_angle, arguments.max_angle, num=arguments.n_angles)
if arguments.system == 'full':
full_column(angles, size=arguments.source_size, n_points=arguments.source_points)
elif arguments.system == 'condenser':
condenser_system(angles, size=arguments.source_size, n_points=arguments.source_points)
elif arguments.system == 'imaging':
raise NotImplementedError(f'System {arguments.system} is not supported yet.')
else:
raise ValueError(f'System {arguments.system} not recognized')
| 70,466 | 21,739 |
import json
import base64
import os
def lambda_handler(event, context):
#os.system("./tensorflow_chessbot.py")
#return "Hi"
text=base64.b64decode(event['body'])
image = open("/tmp/image.png", "wb")
image.write(text)
image.close()
os.system("./tensorflow_chessbot.py --filepath /tmp/image.png")
fen=open("/tmp/fen.txt", "r")
str1=fen.readline()
fen.close()
print("Final FEN" + str1)
return {
'statusCode': 200,
'body': str(str1),
"headers": {
"Access-Control-Allow-Origin" : "*",
}
}
| 532 | 211 |
import komand
from .schema import DoProxiedDatasourceCallInput, DoProxiedDatasourceCallOutput
# Custom imports below
class DoProxiedDatasourceCall(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="do_proxied_datasource_call",
description="Proxies all calls to the actual datasource",
input=DoProxiedDatasourceCallInput(),
output=DoProxiedDatasourceCallOutput(),
)
def run(self, params={}):
urlparts = ["datasources", "proxy", params.get("datasource_id")] + params.get("path").strip("/").split("/")
response = self.connection.request("GET", urlparts, params=params.get("parameters"))
if response.ok:
return {"response": response.json()}
else:
self.logger.error("Grafana API: " + response.json().get("message", ""))
response.raise_for_status()
def test(self):
return self.connection.test()
| 976 | 279 |
import random,math
import numpy as np
import jittor as jt
from jittor.nn import Sequential,LeakyReLU
class LR:
def __init__(self,n):
self.n=n
def get_w(self,m):
w=getattr(m,self.n+'_o')
return w*math.sqrt(2/w[0].numel())
def __call__(self,m,_):
setattr(m,self.n,self.get_w(m))
@staticmethod
def apply(m,n):
w=getattr(m,n)
delattr(m,n)
setattr(m,n+'_o',w)
m.register_pre_forward_hook(LR(n))
def lr(m,n='weight'):
LR.apply(m,n)
return m
class _conv(jt.Module):
def __init__(self,*args,**kwargs):
c=jt.nn.Conv2d(*args,**kwargs)
jt.init.gauss_(c.weight,0,1)
jt.init.constant_(c.bias,0)
self.conv=lr(c)
def execute(self,input):
return self.conv(input)
class _linear(jt.Module):
def __init__(self,i,o):
l=jt.nn.Linear(i,o)
jt.init.gauss_(l.weight,0,1)
jt.init.constant_(l.bias,0)
self.linear=lr(l)
def execute(self,input):
return self.linear(input)
class nfrulb(jt.Function):
def execute(self,o,a,b):
self.x=a,b
return jt.nn.conv2d(o,b,padding=1,groups=o.shape[1])
def grad(self,o):
a,_=self.x
return jt.nn.conv2d(o,a,padding=1,groups=o.shape[1]),None,None
class blurfn(jt.Function):
def execute(self,i,a,b):
self.x=a,b
return jt.nn.conv2d(i,a,padding=1,groups=i.shape[1])
def grad(self,o):
a,b=self.x
return nfrulb().execute(o,a,b),None,None
class Blur(jt.Module):
def __init__(self,c):
w=jt.array([[1,2,1],[2,4,2],[1,2,1]],dtype='float32').reshape(1,1,3,3)
w=w/w.sum()
self._w,self.w_=w.repeat(c,1,1,1),jt.flip(w,[2,3]).repeat(c,1,1,1)
def execute(self,i):
return blurfn().apply(i,self._w,self.w_)
class UpDn(jt.Module):
def __init__(self,i,o,s,p=0,ud=0):
self.w,self.b=jt.randn(o if ud else i,i if ud else o,s,s),jt.zeros(o)
self.mult,self.pad,self.ud=math.sqrt(2/i*s*s),p,ud
def execute(self,i):
w,fn=jt.nn.pad(self.w*self.mult,[1,1,1,1]),jt.nn.conv2d if self.ud else jt.nn.conv_transpose2d
return fn(i,(w[:,:,1:,1:]+w[:,:,:-1,1:]+w[:,:,1:,:-1]+w[:,:,:-1,:-1])/4,self.b,stride=2,padding=self.pad)
class Conv(jt.Module):
def __init__(self,i,o,s,p,s2=None,_p2=None,dn=False,fuse=False):
p1,p2,k1,k2=p,_p2 if _p2 is not None else p,s,s2 if s2 is not None else s
self.conv1=Sequential(_conv(i,o,k1,padding=p1),LeakyReLU(0.2),)
self.conv2=(Sequential(Blur(o),UpDn(o,o,k2,p2,1),LeakyReLU(0.2),) if fuse else Sequential(Blur(o),_conv(o,o,k2,padding=p2),jt.nn.AvgPool2d(2),LeakyReLU(0.2),)) if dn else Sequential(_conv(o,o,k2,padding=p2),LeakyReLU(0.2),)
def execute(self,input):
return self.conv2(self.conv1(input))
class D(jt.Module):
def __init__(self,fuse=True,rgb=False):
self.pro=jt.nn.ModuleList([Conv(16,32,3,1,dn=True,fuse=fuse),Conv(32,64,3,1,dn=True,fuse=fuse),Conv(64,128,3,1,dn=True,fuse=fuse),Conv(128,256,3,1,dn=True,fuse=fuse),Conv(256,512,3,1,dn=True),Conv(512,512,3,1,dn=True),Conv(512,512,3,1,dn=True),Conv(512,512,3,1,dn=True),Conv(513,512,3,1,4,0),])
_rgb=lambda o: Sequential(_conv(3,o,1),LeakyReLU(0.2)) if rgb else _conv(3,o,1)
self.rgb=jt.nn.ModuleList([_rgb(16),_rgb(32),_rgb(64),_rgb(128),_rgb(256),_rgb(512),_rgb(512),_rgb(512),_rgb(512),])
self.linear=_linear(512,1)
def execute(self,input,stp=0,a=-1):
for i in range(stp,-1,-1):
idx=len(self.pro)-i-1
out=self.rgb[idx](input) if i==stp else out
out=jt.concat([out,jt.array(np.std(out.data,axis=0).mean()).expand((out.size(0),1,4,4))],1) if i==0 else out
out=self.pro[idx](out)
out=(1-a)*self.rgb[idx+1](jt.nn.avg_pool2d(input,2))+a*out if i>0 and i==stp and 0<=a<1 else out
return self.linear(out.squeeze(2).squeeze(2))
class const(jt.Module):
def __init__(self,c,s=4):
self.x=jt.randn(1,c,s,s)
def execute(self,i):
return self.x.repeat(i.shape[0],1,1,1)
class noise(jt.Module):
def __init__(self,c):
self.weight=jt.zeros((1,c,1,1))
def execute(self,i,n):
return i+self.weight*n
class Norm(jt.nn.Module):
def __init__(self,i,d):
self.norm,self.style=jt.nn.InstanceNorm2d(i,affine=False),_linear(d,i*2)
self.style.linear.bias.data[:i]=1
self.style.linear.bias.data[i:]=0
def execute(self,i,s):
gamma,beta=self.style(s).unsqueeze(2).unsqueeze(3).chunk(2,1)
return gamma*self.norm(i)+beta
class sConv(jt.Module):
def __init__(self,i,o,s=3,p=1,d=512,init=False,up=False,fuse=False,):
self.conv1=const(i) if init else ((Sequential(UpDn(i,o,s,p,0),Blur(o),) if fuse else Sequential(jt.nn.Upsample(scale_factor=2,mode='nearest'),_conv(i,o,s,padding=p),Blur(o),)) if up else _conv(i,o,s,padding=p))
self.noise1,self.adain1,self.lrelu1=lr(noise(o)),Norm(o,d),LeakyReLU(0.2)
self.conv2,self.noise2,self.adain2,self.lrelu2=_conv(o,o,s,padding=p),lr(noise(o)),Norm(o,d),LeakyReLU(0.2)
def execute(self,i,s,n):
return self.adain2(self.lrelu2(self.noise2(self.conv2(self.adain1(self.lrelu1(self.noise1(self.conv1(i),n)),s)),n)),s)
class G(jt.Module):
def __init__(self,fuse=True):
self.pro=jt.nn.ModuleList([sConv(512,512,3,1,init=True),sConv(512,512,3,1,up=True),sConv(512,512,3,1,up=True),sConv(512,512,3,1,up=True),sConv(512,256,3,1,up=True),sConv(256,128,3,1,up=True,fuse=fuse),sConv(128,64,3,1,up=True,fuse=fuse),sConv(64,32,3,1,up=True,fuse=fuse),sConv(32,16,3,1,up=True,fuse=fuse),])
self.rgb=jt.nn.ModuleList([_conv(512,3,1),_conv(512,3,1),_conv(512,3,1),_conv(512,3,1),_conv(256,3,1),_conv(128,3,1),_conv(64,3,1),_conv(32,3,1),_conv(16,3,1),])
def execute(self,s,n,stp=0,a=-1,mix=(-1,-1)):
out,cros,idx=n[0],0,[len(self.pro)+1] if len(s)<2 else sorted(random.sample(list(range(stp)),len(s)-1))
for i,(conv,rgb) in enumerate(zip(self.pro,self.rgb)):
sstp=s[min(cros+1,len(s)) if (cros<len(idx) and i>idx[cros]) else cros] if mix==(-1,-1) else (s[1] if mix[0]<=i<=mix[1] else s[0])
_out=out if i>0 and stp>0 else None
out=conv(out,sstp,n[i])
if i-stp==0:
out=(1-a)*jt.nn.interpolate(self.rgb[i-1](_out),scale_factor=2,mode='nearest')+a*rgb(out) if i>0 and 0<=a<1 else rgb(out)
break
return out
class pNorm(jt.Module):
def execute(self,i):
return i/jt.sqrt(jt.mean(i**2,dim=1,keepdims=True)+1e-8)
class sG(jt.Module):
def __init__(self,d=512,n=8):
self.g,ls=G(),[pNorm()]
for _ in range(n):
ls.extend([_linear(d,d),LeakyReLU(0.2)])
self.s=Sequential(*ls)
def execute(self,i,n=None,stp=0,a=-1,ms=None,sw=0,mix=(-1,-1)):
i=[i] if type(i) not in (list,tuple) else i
ss=[self.s(j) for j in i]
n=[jt.randn(i[0].shape[0],1,4*2**j,4*2**j) for j in range(stp+1)] if n is None else n
if ms is not None:
norm=[ms+sw*(s-ms) for s in ss]
ss=norm
return self.g(ss,n,stp,a,mix=mix)
def ms(self,i):
return self.s(i).mean(0,keepdims=True)
| 7,220 | 3,454 |
#! /usr/bin/env python3
import random
# acc. to assignment we only need two suits (half of deck)
#spades = 1..13
#hearts = 1..13 * 2
suits = {"spades": 1, "hearts": 2}
values = {"one": 1, "two": 2, "three": 3, "four": 4, "five": 5, "six": 6, "seven": 7,
"eight": 8, "nine": 9, "ten": 10, "elseven": 11, "twelve": 12, "thirteen": 13}
joker_a = ["joker_a", 27]
joker_b = ["joker_b", 27]
def create_deck():
"""Creates a deck of 26 cards (-2 jokers)"""
# list to hold the deck
_deck = []
for i in range(1, 3):
for j in range(1, 14):
_deck.append([i, j])
return _deck
def shuffle_deck(deck_to_shuffle):
"""Shuffles a deck.
The shuffle occurs IN PLACE, but for others to better understand this function I will return the same deck but shuffeled."""
#random seed is set to 10 to ensure same passkey.
random.seed(10)
random.shuffle(deck_to_shuffle)
return deck_to_shuffle
def pick_card(deck_to_pick_from):
"""Returns a random card from the deck"""
return random.choice(deck_to_pick_from)
def insert_jokers(deck_to_insert_into):
"""Inserts joker_a and joker_b into deck"""
deck_to_insert_into.append(joker_a)
deck_to_insert_into.append(joker_b)
def insert_card_by_name(card_in_text, deck_to_insert_into):
"""Adds a new card to the last postion of the deck
Use by inputting card either by text or by [i,j].
"""
splitted_string = card_in_text.split()
value = splitted_string[0]
value = values[value]
suit = splitted_string[2]
if suit == "spades" or suit == "Spades":
suit = 0
elif suit == "hearts" or suit == "Hearts":
suit = 1
card_to_add = [suit, value]
deck_to_insert_into.append(card_to_add)
def insert_card_by_dict(card, deck_to_insert_into):
"""Adds a new card to the last postion of the deck
Use by inputting card by [i,j].
"""
deck_to_insert_into.append(card)
# print(card)
def get_value_of_card(position_of_card, deck):
"""Returns the value of the card that has the specific position in the deck"""
# print(deck[position_of_card])
value_int = deck[position_of_card][1]
return value_int
def get_suit_of_card(position_of_card, deck):
"""Returns the suit of the card that has the specific position in the deck"""
suit_int = deck[position_of_card][0]
if suit_int == 0:
return "Spades"
elif suit_int == 1:
return "Hearts"
def display_card(position_of_card, deck):
"""Displays the card in the specific position in the deck."""
suit = get_suit_of_card(position_of_card, deck)
value = str(get_value_of_card(position_of_card, deck))
text_printed = value + " of " + suit
return text_printed
| 2,748 | 1,027 |
# Programa: Carro Velho ou Carro Novo (usando apenas if 2 vezes)?
idade = int(input('Digite a idade do seu carro: '))
if idade <= 3:
print('O seu carro é novo!')
if idade > 3:
print('O seu carro é velho!')
#Se observarmos de perto não há um só numero que torne ambas as condições verdadeiras ao mesmo tempo. A segunda condição é responsável por decidir a impressão da mensagem do carro velho. | 403 | 139 |
import RPi.GPIO as GPIO
from socket import AF_INET, SOCK_DGRAM
import socket
import struct, time
GPIO.setmode(GPIO.BOARD)
__author__ = 'Sergey'
class NixieClock(object):
CHANNEL_HOUR_0 = 3
CHANNEL_HOUR_1 = 5
CHANNEL_MINUTE_0 = 7
CHANNEL_MINUTE_1 = 11
CHANNEL_SECOND_0 = 13
CHANNEL_SECOND_1 = 15
CHANNEL_SECONDS = [CHANNEL_SECOND_0, CHANNEL_SECOND_1]
CHANNEL_HOURS = [CHANNEL_HOUR_0, CHANNEL_HOUR_1]
CHANNEL_MINUTES = [CHANNEL_MINUTE_0, CHANNEL_MINUTE_1]
channels_digits = [40, 38, 36, 32] # 8 - 4 - 2 - 1
TUBE_TYPE_SECONDS = "TUBE_TYPE_SECONDS"
TUBE_TYPE_HOURS_MINUTES = "TUBE_TYPE_HOURS_MINUTES"
def __init__(self):
GPIO.setup(self.CHANNEL_HOURS + self.CHANNEL_MINUTES + self.CHANNEL_SECONDS, GPIO.OUT)
GPIO.setup(self.channels_digits, GPIO.OUT)
def _show(self, d_digit, tube_type):
if d_digit > 9 or d_digit < 0:
raise Exception("invalid passed parameter")
digit_to_show = d_digit
if tube_type == self.TUBE_TYPE_HOURS_MINUTES:
convert_digit = {0:2, 1:1, 2:0, 3:9, 4:8, 5:7, 6:6, 7:5, 8:4, 9:3}
digit_to_show = convert_digit[d_digit]
b_array = [abs(int(i)) for i in '{0:0>4b}'.format(digit_to_show)]
GPIO.output(self.channels_digits, b_array)
def _convertToDigitArray(self, number):
digits = []
while True:
d = number%10
digits.append(d)
number = (number - d)/10
if number<=9:
digits.append(number)
break
return list(reversed(digits))
def _getNTPTime(self, host="pool.ntp.org"):
port = 123
buf = 1024
address = (host, port)
msg = '\x1b' + 47 * '\0'
# reference time (in seconds since 1900-01-01 00:00:00)
TIME1970 = 2208988800L # 1970-01-01 00:00:00
# connect to server
client = socket.socket(AF_INET, SOCK_DGRAM)
client.sendto(msg, address)
msg, address = client.recvfrom(buf)
t = struct.unpack("!12I", msg)[10]
t -= TIME1970# - 3600 * 3
return time.gmtime(t)
def run(self):
while True:
struct_time = time.localtime() #(tm_year,tm_mon,tm_mday,tm_hour,tm_min, tm_sec,tm_wday,tm_yday,tm_isdst)
hours = self._convertToDigitArray(struct_time[3])
minutes = self._convertToDigitArray(struct_time[4])
seconds = self._convertToDigitArray(struct_time[5])
for channel, digit in zip(self.CHANNEL_HOURS + self.CHANNEL_MINUTES, hours + minutes):
GPIO.output(channel, GPIO.HIGH)
self._show(digit, self.TUBE_TYPE_HOURS_MINUTES)
time.sleep(0.005)
GPIO.output(channel, GPIO.LOW)
for channel, digit in zip(self.CHANNEL_SECONDS, seconds):
GPIO.output(channel, GPIO.HIGH)
self._show(digit, self.TUBE_TYPE_SECONDS)
time.sleep(0.002)
GPIO.output(channel, GPIO.LOW)
nc = NixieClock()
nc.run() | 3,069 | 1,291 |
import django
import os
from unittest.mock import patch
from django.apps import apps
from django.conf import settings
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.recorder import MigrationRecorder
from django.db import connections
from django.core.management import call_command
from django.test import TransactionTestCase, override_settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings'
django.setup()
class MigrationTests(TransactionTestCase):
def tearDown(self):
for db in self.databases:
recorder = MigrationRecorder(connections[db])
recorder.migration_qs.filter(app='migrations').delete()
available_apps = ['migrations']
def assertTableNotExists(self, table, using='default'):
with connections[using].cursor() as cursor:
self.assertNotIn(table, connections[using].introspection.table_names(cursor))
def assertViewExists(self, view, using='default'):
with connections[using].cursor() as cursor:
tables = [
table.name for table in connections[using].introspection.get_table_list(cursor) if table.type == 'v'
]
self.assertIn(view, tables)
def assertViewNotExists(self, view, using='default'):
with connections[using].cursor() as cursor:
tables = [
table.name for table in connections[using].introspection.get_table_list(cursor) if table.type == 'v'
]
self.assertNotIn(view, tables)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_basic_view_creation'})
def test_migrate_successfully_creates_view(self):
call_command('migrate')
self.assertViewExists('question_stat')
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_basic_view_creation'})
def test_roll_back_successfully_removes_view(self):
call_command('migrate')
call_command('migrate', 'migrations', 'zero')
self.assertViewNotExists('question_stat')
| 2,106 | 598 |
"""
Helper function for create directories
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
def create_dirs(dirs):
"""
Utility function for creating directories
Args:
dirs (list of string): A list of directories to create if these
directories are not found.
"""
logger = logging.getLogger('Create Directories')
for dir_ in dirs:
try:
os.makedirs(dir_)
except FileExistsError:
logger.warning('Directories already exist: %s', dir_)
| 609 | 163 |
# Generated by Django 3.0.8 on 2020-07-22 07:34
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=1, verbose_name='années')),
('number', models.IntegerField(validators=[django.core.validators.MaxValueValidator(999), django.core.validators.MinValueValidator(1)], verbose_name='numéro de lot')),
('quantity', models.IntegerField(validators=[django.core.validators.MaxValueValidator(999), django.core.validators.MinValueValidator(1)], verbose_name='quantité')),
('batch_number', models.CharField(blank=True, max_length=10, unique=True, verbose_name='numéro de lot')),
('active', models.BooleanField(default=True)),
('start_date', models.DateField(null=True, verbose_name='date de début')),
('end_date', models.DateField(null=True, verbose_name='date de fin')),
('created_at', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Default',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='code defaut')),
('description', models.CharField(max_length=200, verbose_name='libellé')),
],
),
migrations.CreateModel(
name='Repair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identify_number', models.CharField(max_length=10, unique=True, verbose_name="n° d'identification")),
('product_number', models.CharField(blank=True, max_length=50, verbose_name='référence')),
('remark', models.CharField(blank=True, max_length=1000, verbose_name='remarques')),
('quality_control', models.BooleanField(default=False, verbose_name='contrôle qualité')),
('checkout', models.BooleanField(default=False, verbose_name='contrôle de sortie')),
('closing_date', models.DateTimeField(blank=True, null=True, verbose_name='date de cloture')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='ajouté le')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modifié le')),
('batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repairs', to='reman.Batch')),
('created_by', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='repairs_created', to=settings.AUTH_USER_MODEL)),
('default', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='repairs', to='reman.Default')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='repairs_modified', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SparePart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code_produit', models.CharField(max_length=100, verbose_name='code Produit')),
('code_magasin', models.CharField(blank=True, max_length=20, verbose_name='code Magasin')),
('code_zone', models.CharField(blank=True, max_length=20, verbose_name='code Zone')),
('code_site', models.IntegerField(blank=True, null=True, verbose_name='code Site')),
('code_emplacement', models.CharField(blank=True, max_length=10, verbose_name='code Emplacement')),
('cumul_dispo', models.IntegerField(blank=True, null=True, verbose_name='cumul Dispo')),
('repairs', models.ManyToManyField(blank=True, related_name='spare_part', to='reman.Repair')),
],
),
migrations.CreateModel(
name='EcuType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hw_reference', models.CharField(max_length=10, unique=True, verbose_name='hardware')),
('technical_data', models.CharField(blank=True, max_length=50, verbose_name='modèle produit')),
('supplier_oe', models.CharField(blank=True, max_length=50, verbose_name='fabriquant')),
('spare_part', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reman.SparePart')),
],
),
migrations.CreateModel(
name='EcuRefBase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reman_reference', models.CharField(max_length=10, unique=True, verbose_name='référence REMAN')),
('ecu_type', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reman.EcuType')),
],
),
migrations.CreateModel(
name='EcuModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('psa_barcode', models.CharField(max_length=10, unique=True, verbose_name='code barre PSA')),
('oe_raw_reference', models.CharField(max_length=10, verbose_name='réference OEM brute')),
('oe_reference', models.CharField(blank=True, max_length=10, verbose_name='référence OEM')),
('sw_reference', models.CharField(blank=True, max_length=10, verbose_name='software')),
('former_oe_reference', models.CharField(blank=True, max_length=50, verbose_name='ancienne référence OEM')),
('supplier_es', models.CharField(blank=True, max_length=50, verbose_name='service après vente')),
('ecu_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reman.EcuType')),
],
),
migrations.AddField(
model_name='default',
name='ecu_type',
field=models.ManyToManyField(blank=True, related_name='defaults', to='reman.EcuType'),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=500, verbose_name='commentaire')),
('repair', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='reman.Repair')),
],
),
migrations.AddField(
model_name='batch',
name='ecu_ref_base',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reman.EcuRefBase'),
),
]
| 7,751 | 2,287 |
from .core import Control, AssemblyContext, Trigger
from .utils import to_name
import numpy as np
from typing import List, Callable
class ContextRetriever:
def __call__(self, assembly_context: AssemblyContext):
return None
class CopyFromParameter(ContextRetriever):
def __init__(self, src_component, src_parameter, apply: Callable = None):
self._src_component = to_name(src_component)
self._src_parameter = src_parameter
if not apply is None:
assert callable(apply)
self._apply = apply
def __call__(self, assembly_context: AssemblyContext):
x = assembly_context.get_parameter(self._src_component, self._src_parameter)
if not self._apply is None:
x = self._apply(x)
return x
class CopyFromHistory(ContextRetriever):
def __init__(self, src_component, lag, apply: Callable = None):
self._src_component = to_name(src_component)
self._lag = lag
if not apply is None:
assert callable(apply)
self._apply = apply
def __call__(self, assembly_context: AssemblyContext):
x = assembly_context.query(self._src_component, self._lag)
if not self._apply is None:
x = self._apply(x)
return x
class CopyFromCounter(ContextRetriever):
def __init__(self, src_component, src_counter, apply: Callable = None):
self._src_component = to_name(src_component)
self._src_counter = src_counter
if not apply is None:
assert callable(apply)
self._apply = apply
def __call__(self, assembly_context: AssemblyContext):
x = assembly_context.read_counter(self._src_component, self._src_counter)
if not self._apply is None:
x = self._apply(x)
return x
class UpdateParametersControl(Control):
def __init__(self, name, behavior, when, trigger: Trigger,
update_choices: List, p: List = None,
priority=0
):
def choose_and_update(assembly_context: AssemblyContext,
update_choices: List, p=None):
choice_idx = np.random.choice(np.arange(len(update_choices)), p=p)
choice = update_choices[choice_idx]
if choice is not None:
for param_tuple in choice:
component, parameter, value = param_tuple
if isinstance(value, ContextRetriever):
value = value(assembly_context)
assembly_context.set_parameter(component, parameter, value)
super().__init__(name, behavior, when, trigger,
action=choose_and_update,
action_parameters=dict(update_choices=update_choices, p=p),
priority=priority)
class ResetCounterControl(Control):
def __init__(self, name, behavior, when, trigger: Trigger,
component, counter,
priority=0
):
def reset_counter(assembly_context: AssemblyContext,
component, counter):
assembly_context.reset_counter(component, counter)
super().__init__(name, behavior, when, trigger,
action=reset_counter,
action_parameters=dict(component=component,
counter=counter),
priority=priority)
class IncrementCounterControl(Control):
def __init__(self, name, behavior, when, trigger: Trigger,
component, counter, increment=1,
priority=0
):
def increment_counter(assembly_context: AssemblyContext,
component, counter, increment):
assembly_context.increment_counter(component, counter, increment)
super().__init__(name, behavior, when, trigger,
action=increment_counter,
action_parameters=dict(component=component,
counter=counter,
increment=increment),
priority=priority)
| 4,236 | 1,047 |
import importlib
from django.conf import settings # noqa
from django.core.exceptions import ImproperlyConfigured
from appconf import AppConf
def load_path_attr(path):
i = path.rfind(".")
module, attr = path[:i], path[i + 1:]
try:
mod = importlib.import_module(module)
except ImportError as e:
raise ImproperlyConfigured(f"Error importing {module}: '{e}'")
try:
attr = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(f"Module '{module}' does not define a '{attr}'")
return attr
class WikiAppConf(AppConf):
BINDERS = [
"pinax.wiki.binders.DefaultBinder"
]
IP_ADDRESS_META_FIELD = "HTTP_X_FORWARDED_FOR"
HOOKSET = "pinax.wiki.hooks.WikiDefaultHookset"
PARSE = "pinax.wiki.parsers.creole_parse"
class Meta:
prefix = "pinax_wiki"
def configure_binders(self, value):
binders = []
for val in value:
binders.append(load_path_attr(val)())
return binders
def configure_hookset(self, value):
return load_path_attr(value)()
def configure_parse(self, value):
return load_path_attr(value)
| 1,172 | 396 |
from django.db.models import Q
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from carbon.compounds.page.views import PageDetail as BasePageDetail
from carbon.compounds.page.views import PageTagView as BasePageTagView
from carbon.compounds.page.views import PageBlockView as BasePageBlockView
from carbon.atoms.views.abstract import *
from carbon.atoms.views.content import *
from .models import *
from .forms import ProductCreateForm
from kathisattic.apps.media.models import Image
class ProductCreateView(ObjectTemplateResponseMixin, CreateView):
model = Product
success_message = "%(name)s was created successfully"
form_class = ProductCreateForm
def get_template_names(self):
return ['product-add']
def form_valid(self, form):
img = Image(image=form.cleaned_data['raw_image'])
img.save()
form.instance.image = img
return super(ProductCreateView, self).form_valid(form)
class ProductDetail(BasePageBlockView, BasePageDetail):
model = Product
def get_template_names(self):
return ['product-detail']
def get_template(self):
return self.object.template
class ProductListView(ObjectTemplateResponseMixin, ListView):
model = Product
tag = None
search_filter = None
def get_template_names(self):
return ['product-list']
def get_queryset(self):
tag_filter = self.request.GET.get('tag', None)
search_filter = self.request.GET.get('q', None)
products = Product.objects.filter(sale_status=Product.FOR_SALE)
if tag_filter:
try:
tag = ProductTag.objects.get(slug=tag_filter)
self.tag = tag
return products.filter(tags__in=[tag])
except:
return None
elif search_filter:
self.search_filter = search_filter
return products.filter(
Q(content__icontains=search_filter)|Q(title__icontains=search_filter)|Q(key__icontains=search_filter)|Q(synopsis__icontains=search_filter)
)
else:
return products
def get_context_data(self, **kwargs):
context = super(ProductListView, self).get_context_data(**kwargs)
context['tag'] = self.tag
context['search_filter'] = self.search_filter
return context
class ProductTagView(BasePageTagView):
model = ProductTag
| 2,471 | 693 |
#!/usr/bin/python
# Filename: datastruct_print_tuple.py
age = 25
name = 'Airgis'
print '%s is %d years old.' %(name, age)
print 'Why is %s playing with the python?' % name
| 174 | 69 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 09:08:20 2022
A script to plot mean daily cores for intercomparison of features as a function of time through a season.
@author: michaeltown
"""
#libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime as dt
import pickle as pkl
from scipy.signal import find_peaks
import figureMagic as fm
# useful stuff
#symbols
d18Osym = '$\delta^{18}$O'
dDsym = '$\delta$D'
pptsym = 'ppt' # '\textperthousand'
# read data from files
fileLoc = '/home/michaeltown/work/projects/snowiso/data/EastGRIP/isotopes/';
figureLoc ='/home/michaeltown/work/projects/snowiso/figures/EastGRIP/'
fileNameIso = 'eastGRIP_SCisoData_2016-2019_acc_peaks.pkl'
df_iso = pd.read_pickle(fileLoc+fileNameIso);
# plot average of each day (1-5 positions) as separate subplots, with std, num
# this groupby gives a multiindex data frame.
columnsToProcess = ['d18O','dD','dexcess','dxsln']
yearUnique = df_iso.year.unique();
df_iso_pos = df_iso.groupby(['depthAcc_reg','date'])[columnsToProcess].mean()
df_iso_pos[['d18O_std','dD_std','dexcess_std','dxsln_std']] = df_iso.groupby(['depthAcc_reg','date'])[columnsToProcess].std()
df_iso_pos[['d18O_max','dD_max','dexcess_max','dxsln_max']] = df_iso.groupby(['depthAcc_reg','date'])[columnsToProcess].max()
df_iso_pos[['d18O_min','dD_min','dexcess_min','dxsln_min']] = df_iso.groupby(['depthAcc_reg','date'])[columnsToProcess].min()
df_iso_pos[['d18O_num','dD_num','dexcess_num','dxsln_num']] = df_iso.groupby(['depthAcc_reg','date'])[columnsToProcess].count()
df_iso_pos = df_iso_pos.reset_index(level = 0); # should reset the index to date and leave depthAcc_reg as a column
df_iso_pos['year'] = df_iso_pos.index.year
df_iso_pos['dates'] = df_iso_pos.index.date
fileLoc = None;
for y in yearUnique[1:]:
dates = df_iso_pos[df_iso_pos.year == y].dates.unique()
dates.sort()
for d in dates:
figureLocTemp = figureLoc + str(y) +'/'
num = df_iso_pos[df_iso_pos.dates == d].d18O_num
iso = df_iso_pos[df_iso_pos.dates == d].d18O
lbstd = df_iso_pos[df_iso_pos.dates == d].d18O-df_iso_pos[df_iso_pos.dates == d].d18O_std;
ubstd = df_iso_pos[df_iso_pos.dates == d].d18O+df_iso_pos[df_iso_pos.dates == d].d18O_std;
lbmin = df_iso_pos[df_iso_pos.dates == d].d18O_min;
ubmax = df_iso_pos[df_iso_pos.dates == d].d18O_max;
depth = df_iso_pos[df_iso_pos.dates == d].depthAcc_reg;
fm.myDepthFunc(iso,-depth,num,'black',lbstd,ubstd,lbmin,ubmax,'EastGRIP ' + str(d) + ' '+d18Osym+' profile',
'd18O','depth (cm)',[-50,-20],[-100,15],figureLocTemp,'prof_d18O_dailyMean_'+str(d));
num = df_iso_pos[df_iso_pos.dates == d].dD_num
iso = df_iso_pos[df_iso_pos.dates == d].dD
lbstd = df_iso_pos[df_iso_pos.dates == d].dD-df_iso_pos[df_iso_pos.dates == d].dD_std;
ubstd = df_iso_pos[df_iso_pos.dates == d].dD+df_iso_pos[df_iso_pos.dates == d].dD_std;
lbmin = df_iso_pos[df_iso_pos.dates == d].dD_min;
ubmax = df_iso_pos[df_iso_pos.dates == d].dD_max;
depth = df_iso_pos[df_iso_pos.dates == d].depthAcc_reg;
fm.myDepthFunc(iso,-depth,num,'blue',lbstd,ubstd,lbmin,ubmax,'EastGRIP ' + str(d) + ' '+dDsym+' profile',
'dD','depth (cm)',[-380,-150],[-100,15],figureLocTemp,'prof_dD_dailyMean_'+str(d));
num = df_iso_pos[df_iso_pos.dates == d].dexcess_num
iso = df_iso_pos[df_iso_pos.dates == d].dexcess
lbstd = df_iso_pos[df_iso_pos.dates == d].dexcess-df_iso_pos[df_iso_pos.dates == d].dexcess_std;
ubstd = df_iso_pos[df_iso_pos.dates == d].dexcess+df_iso_pos[df_iso_pos.dates == d].dexcess_std;
lbmin = df_iso_pos[df_iso_pos.dates == d].dexcess_min;
ubmax = df_iso_pos[df_iso_pos.dates == d].dexcess_max;
depth = df_iso_pos[df_iso_pos.dates == d].depthAcc_reg;
fm.myDepthFunc(iso,-depth,num,'lightblue',lbstd,ubstd,lbmin,ubmax,'EastGRIP ' + str(d) + ' dxs profile',
'dexcess','depth (cm)',[-10,30],[-100,15],figureLocTemp,'prof_dxs_dailyMean_'+str(d));
num = df_iso_pos[df_iso_pos.dates == d].dxsln_num
iso = df_iso_pos[df_iso_pos.dates == d].dxsln
lbstd = df_iso_pos[df_iso_pos.dates == d].dxsln-df_iso_pos[df_iso_pos.dates == d].dxsln_std;
ubstd = df_iso_pos[df_iso_pos.dates == d].dxsln+df_iso_pos[df_iso_pos.dates == d].dxsln_std;
lbmin = df_iso_pos[df_iso_pos.dates == d].dxsln_min;
ubmax = df_iso_pos[df_iso_pos.dates == d].dxsln_max;
depth = df_iso_pos[df_iso_pos.dates == d].depthAcc_reg;
fm.myDepthFunc(iso,-depth,num,'deepskyblue',lbstd,ubstd,lbmin,ubmax,'EastGRIP ' + str(d) + ' dxsln profile',
'dxsln','depth (cm)',[-5,35],[-100,15],figureLocTemp,'prof_dxsln_dailyMean_'+str(d));
# plot the difference of from the mean as an evolution in time, contour plot of changes...
# need to develop a df of the mean iso values with column names as the date, and the index as the depth.
# iso18Omean = | 5,124 | 2,131 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 23:2 7:28 2019
@author: Winham
网络测试
"""
import os
import numpy as np
from keras.models import load_model
from keras.utils import to_categorical
from data_preprocess import *
import mit_utils as utils
import time
import matplotlib.pyplot as plt
import tensorflow_addons as tfa
target_class = ['W', 'N1', 'N2', 'N3', 'REM']
target_sig_length = 3072
tic = time.time()
trainX, trainY, TestX, TestY = dataload('channel0.npz')
toc = time.time()
markov_matrix = [[66927., 3996., 179., 6., 86.],
[2252., 17891., 4269., 9., 753.],
[1271., 2262., 80861., 3546., 1043.],
[179., 113., 3247., 15892., 23.],
[565., 912., 427., 1., 32279.]]
markov_matrix = np.array(markov_matrix)
# markov_matrix_copy = markov_matrix.copy()
# for i in range(5):
# markov_matrix_copy[i] /= markov_matrix_copy[i].sum()
# print(markov_matrix_copy)
markov_matrix = np.log2(markov_matrix) ** 3
for i in range(5):
max = np.max(markov_matrix[i])
markov_matrix[i] /= max
# print(markov_matrix)
# assert False
print('Time for data processing--- '+str(toc-tic)+' seconds---')
model_name = 'myNet.h5'
model = load_model(model_name)
# model.summary()
pred_vt = model.predict(TestX, batch_size=256, verbose=1)
pred_v = np.argmax(pred_vt, axis=1)
true_v = np.argmax(TestY, axis=1)
def weight_decay(order):
weights = []
for i in range(order):
weights.append(4 ** (-i))
return weights
order = 6
weight = weight_decay(order)
for i in range(1,len(pred_vt)-order):
factor = 1
if pred_v[i-1] != pred_v[i]:
for j in range(1,order+1):
if pred_v[i+j] == pred_v[i-1]:
factor += weight[j-1]*2.1
elif pred_v[i+j] == pred_v[i]:
factor -= 0.55 * weight[j-1]
if factor < 0.1:
factor = 0.1
vector = markov_matrix[pred_v[i - 1]].copy()
vector[pred_v[i-1]] *= factor
re_pred = pred_vt[i] * vector
# print(re_pred)
pred_v[i] = np.argmax(re_pred)
# f1 = 3.1
# f2 = 0.45
# for i in range(1,len(pred_vt)-1):
# if pred_v[i-1] != pred_v[i]:
# if pred_v[i-1] == pred_v[i+1]:
# factor = f1
# elif pred_v[i] == pred_v[i+1]:
# factor = f2
# else:
# factor = 1
# # print(pred_vt[i])
# vector = markov_matrix[pred_v[i - 1]].copy()
# vector[pred_v[i-1]] *= factor
# re_pred = pred_vt[i] * vector
# # print(re_pred)
# pred_v[i] = np.argmax(re_pred)
utils.plot_confusion_matrix(true_v, pred_v, np.array(target_class))
utils.print_results(true_v, pred_v, target_class)
plt.savefig('cm.png')
# pred_v = pred_v[:10000]
# pred_v.resize((100,100))
# plt.subplot(121)
# plt.matshow(pred_v, cmap = plt.cm.Blues)
# plt.savefig('cm_pred.png')
#
# true_v = true_v[:10000]
# true_v.resize((100,100))
# plt.subplot(122)
# plt.matshow(true_v, cmap = plt.cm.Blues)
# plt.savefig('cm_true.png')
| 3,147 | 1,405 |
# content of ./test_smtpsimple.py
import pytest
@pytest.fixture
def smtp():
import smtplib
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
def test_ehlo(smtp):
response, msg = smtp.ehlo()
assert response == 250
| 235 | 96 |
'''
check
'''
groups = [0, 0, 0, 0]
n = int(input())
data = input()
carsnum = 0
for i in range(4):
groups[i] = data.count(str(i+1))
# deal with 4 people group
carsnum += groups[3]
groups[3] = 0
# deal with 2 people group
carsnum += groups[1] // 2
groups[1] %= 2
# deal with 1 and 3 people group
if groups[0] <= groups[2]:
carsnum += groups[0]
groups[2] -= groups[0]
groups[0] = 0
# deal with the 3 people group left
carsnum += groups[2]
if groups[1] != 0:
carsnum += 1
else:
carsnum += groups[2]
groups[0] -= groups[2]
groups[2] = 0
# deal with the 1 people group left
carsnum += groups[0] // 4
groups[0] %= 4
if groups[1] == 0:
if groups[0] != 0:
carsnum += 1
else:
# 2 people group has 1 group
if groups[0] == 3:
carsnum += 2
else:
carsnum += 1
print(carsnum) | 902 | 358 |
import abc
import importlib
import logging
log = logging.getLogger("MPP-Solar")
SERIAL_TYPE_TEST = 1
SERIAL_TYPE_USB = 2
SERIAL_TYPE_ESP32 = 4
SERIAL_TYPE_SERIAL = 8
class AbstractDevice(metaclass=abc.ABCMeta):
"""
Abstract device class
"""
def __init__(self, *args, **kwargs):
self._protocol = None
self._protocol_class = None
self._port = None
def is_test_device(self, serial_device):
return "test" in serial_device.lower()
def is_directusb_device(self, serial_device):
"""
Determine if this instance is using direct USB connection
(instead of a serial connection)
"""
if not serial_device:
return False
if "hidraw" in serial_device:
log.debug("Device matches hidraw")
return True
if "mppsolar" in serial_device:
log.debug("Device matches mppsolar")
return True
return False
def is_ESP32_device(self, serial_device):
return "esp" in serial_device.lower()
def get_port_type(self, port):
if self.is_test_device(port):
return SERIAL_TYPE_TEST
elif self.is_directusb_device(port):
return SERIAL_TYPE_USB
elif self.is_ESP32_device(port):
return SERIAL_TYPE_ESP32
else:
return SERIAL_TYPE_SERIAL
def set_protocol(self, protocol=None):
"""
Set the protocol for this device
"""
log.debug(f"device.set_protocol with protocol {protocol}")
if protocol is None:
self._protocol = None
self._protocol_class = None
return
protocol_id = protocol.lower()
# Try to import the protocol module with the supplied name (may not exist)
try:
proto_module = importlib.import_module(
"mppsolar.protocols." + protocol_id, "."
)
except ModuleNotFoundError:
log.error(f"No module found for protocol {protocol_id}")
self._protocol = None
self._protocol_class = None
return
# Find the protocol class - classname must be the same as the protocol_id
try:
self._protocol_class = getattr(proto_module, protocol_id)
except AttributeError:
log.error(f"Module {proto_module} has no attribute {protocol_id}")
self._protocol = None
self._protocol_class = None
return
# Instantiate the class
# TODO: fix protocol instantiate
self._protocol = self._protocol_class(
"init_var", proto_keyword="value", second_keyword=123
)
def set_port(self, port=None):
port_type = self.get_port_type(port)
if port_type == SERIAL_TYPE_TEST:
log.info("Using testio for communications")
from mppsolar.io.testio import TestIO
self._port = TestIO()
elif port_type == SERIAL_TYPE_USB:
log.info("Using hidrawio for communications")
from mppsolar.io.hidrawio import HIDRawIO
self._port = HIDRawIO(device_path=port)
elif port_type == SERIAL_TYPE_ESP32:
log.info("Using esp32io for communications")
from mppsolar.io.esp32io import ESP32IO
self._port = ESP32IO(device_path=port)
elif port_type == SERIAL_TYPE_SERIAL:
log.info("Using serialio for communications")
from mppsolar.io.serialio import SerialIO
self._port = SerialIO(serial_port=port, serial_baud=2400)
else:
self._port = None
@abc.abstractmethod
def run_command(self, command=None, show_raw=False):
raise NotImplementedError
@abc.abstractmethod
def get_status(self, show_raw):
raise NotImplementedError
@abc.abstractmethod
def get_settings(self, show_raw):
raise NotImplementedError
def run_default_command(self, show_raw):
return self.run_command(
command=self._protocol.DEFAULT_COMMAND, show_raw=show_raw
)
| 4,113 | 1,182 |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseRedirect
from .forms import Predict
from .ML_ALGORITHM import you
import numpy
def index(request):
return render(request, 'autism/home.html')
def predict(request):
return render(request, 'autism/predict.html')
def predicted(request):
if request.method == "POST":
form = Predict(request.POST)
type1 = int(request.POST['type1'])
type2 = int(request.POST['type2'])
type3 = int(request.POST['type3'])
type4 = int(request.POST['type4'])
type5 = int(request.POST['type5'])
type6 = float(request.POST['type6'])
type7 = float(request.POST['type7'])
type8 = int(request.POST['type8'])
x= []
new_list = []
x.append(type1)
x.append(type2)
x.append(type3)
x.append(type4)
x.append(type5)
x.append(type6)
x.append(type7)
x.append(type8)
list = you.getPrediction(x)
yes = list[0]
no = 100-list[0]
new_list.append(yes)
new_list.append(no)
label = ['yes','no']
zipped_list = zip(list)
context = {
'zipped_list': zipped_list,
'list': new_list,
'label': label,
}
print(list)
return render(request, 'autism/predicted.html',context)
else:
form = Predict()
return render(request,'autism/predicted.html',{'form':form})
def restapi(request):
type1 = request.GET.get('value1', -1)
type2 = request.GET.get('value2', -1)
type3 = request.GET.get('value3', -1)
type4 = request.GET.get('value4', -1)
type5 = request.GET.get('value5', -1)
type6 = request.GET.get('value6', -1)
type7 = request.GET.get('value7', -1)
type8 = request.GET.get('value8', -1)
x= []
new_list = []
x.append(type1)
x.append(type2)
x.append(type3)
x.append(type4)
x.append(type5)
x.append(type6)
x.append(type7)
x.append(type8)
list = you.getPrediction(x)
yes = list[0]
no = 100-list[0]
new_list.append(yes)
new_list.append(no)
label = ['yes','no']
zipped_list = zip(list)
context = {
'zipped_list': zipped_list,
'list': new_list,
'label': label,
}
print(list)
return render(request, 'autism/predicted.html',context) | 2,539 | 856 |
import pytest
from model_wrappers.util.datatypes import ImmutableDict
d = {
'a': 0,
'b': 'x',
}
im = ImmutableDict(d)
def test_str():
assert repr(im) == repr(d)
assert str(im) == str(d)
def test_items():
assert set(im.keys()) == {'a', 'b'}
assert set(im.values()) == {0, 'x'}
assert set(im.items()) == {('a', 0), ('b', 'x')}
assert set(im) == {'a', 'b'}
def test_collection():
assert len(im) == 2
assert 'a' in im
assert 'c' not in im
def test_dict():
assert im['a'] == 0
assert im.get('a') == 0
with pytest.raises(KeyError):
im['c']
assert im.get('c') is None
def test_hashable():
with pytest.raises(TypeError):
{d: 0}
{im: 0}
| 724 | 296 |
import time
import Generation_Stage
import Evaluate_lle
import Evaluate_pca
import Evaluation_Stage
print("**********************************************************************")
print("Hello. This is CSE569 Project Demo, produced by Haisi Yi and Zheng Xia")
print("**********************************************************************\n\n")
while True:
option = input("\nPlease specify the task to perform:\n"
"1: Generate five artificial dataset and read MNIST_images dataset\n"
"2: Perform PCA to all artificial dataset and MNIST_images dataset\n"
"3: Perform LLE 11 * 6 times, using parameter k = 5, 6, ..., 15, to all artificial dataset and MNIST_images dataset\n"
"4: Do task 1, task 2 and task 3. This task will take about 20 min\n"
"5: Evaluate the data produced by PCA. This task will take about 40 min.\n"
"6: Evaluate the data produced by LLE. This task will take about 40 min.\n"
"7: Run everything. This will take about 8 hours.\n"
"0: Exit this Demo\n")
option = int(option)
if option == 1:
Generation_Stage.generate_original_datasets()
elif option == 2:
Generation_Stage.perform_pca_to_original_datasets()
elif option == 3:
Generation_Stage.perform_lle_to_orginal_datasets()
elif option == 4:
Generation_Stage.run()
elif option == 5:
Evaluate_pca.run()
elif option == 6:
Evaluate_lle.run()
elif option == 7:
Evaluation_Stage.run()
break
elif option == 0:
break
else:
print("Invalid option, try again")
| 1,708 | 506 |
import logging
from django.core.handlers.wsgi import WSGIRequest
from django.http import JsonResponse
from django.views.defaults import server_error, page_not_found
from rest_framework import status
from medical_peek_core.model.j_send import JSend, JSendSerializer
from medical_peek_core.utility.exception_utility import ExceptionUtility
logger = logging.getLogger(__name__)
def rest_exception_handler(exception, context):
"""
Exception handler utilized by the Django Rest Framework
The exception handler will override the default implementation of the Django Rest Framework Exception handler if
the "Accept" header of the request in the current context has "application/json" in its value. If this is true,
a JSonResponse View will be returned to the user containing a JSend object that represents the exception.
:param exception: Exception that occurred
:type exception: object
:param context: Context of the exception (i.e. request)
:type context: dict
:return: JSonResponse View with JSend error if the Accept header of the request has a value of "application/json"
:rtype: JsonResponse
"""
if context.get('request', None) is not None \
and 'application/json' in context.get('request').META.get('HTTP_ACCEPT', ''):
logger.error("Unhandled exception!")
logger.exception(exception)
j_send = ExceptionUtility.get_jsend_from_exception(exception)
j_send_serializer = JSendSerializer(data = j_send.__dict__)
j_send_serializer.is_valid(True)
return JsonResponse(j_send_serializer.data, status = j_send.code)
def handler500(request, template_name = '500.html'):
"""
Overrides the default Django implementation of a 500 error so that a JSon response will be provided if the accept
header of the request has a value of "application/json". Otherwise the default server error implementation is
called.
To enable this handler, the DEBUG setting in the Django settings must be set to False
:param request: Current Request
:type request: WSGIRequest
:param template_name: Template of the error page
:type template_name: str
:return: Response
:rtype: object
"""
if request is not None and 'application/json' in request.META.get('HTTP_ACCEPT', ''):
logger.error("Unhandled exception!")
j_send = JSend()
j_send.status = JSend.Status.error
j_send.code = status.HTTP_500_INTERNAL_SERVER_ERROR
j_send.message = 'Unexpected API Server Error'
j_send_serializer = JSendSerializer(data = j_send.__dict__)
j_send_serializer.is_valid(True)
return JsonResponse(j_send_serializer.data, status = status.HTTP_500_INTERNAL_SERVER_ERROR)
return server_error(request = request, template_name = template_name)
def handler404(request, template_name = '404.html'):
"""
Overrides the default Django implementation of a 404 error so that a JSon response will be provided if the accept
header of the request has a value of "application/json". Otherwise the default server error implementation is
called.
To enable this handler, the DEBUG setting in the Django settings must be set to False
:param request: Current Request
:type request: WSGIRequest
:param template_name: Template of the error page
:type template_name: str
:return: Response
:rtype: object
"""
if 'application/json' in request.META.get('HTTP_ACCEPT', ''):
j_send = JSend()
j_send.status = JSend.Status.error
j_send.code = status.HTTP_404_NOT_FOUND
j_send.message = 'Not found'
j_send_serializer = JSendSerializer(data = j_send.__dict__)
j_send_serializer.is_valid(True)
return JsonResponse(j_send_serializer.data, status = j_send.code)
return page_not_found(request, template_name)
| 3,868 | 1,085 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_smg2s')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_smg2s')
_smg2s = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_smg2s', [dirname(__file__)])
except ImportError:
import _smg2s
return _smg2s
try:
_mod = imp.load_module('_smg2s', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_smg2s = swig_import_helper()
del swig_import_helper
else:
import _smg2s
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _smg2s.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _smg2s.SwigPyIterator_value(self)
def incr(self, n=1):
return _smg2s.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _smg2s.SwigPyIterator_decr(self, n)
def distance(self, x):
return _smg2s.SwigPyIterator_distance(self, x)
def equal(self, x):
return _smg2s.SwigPyIterator_equal(self, x)
def copy(self):
return _smg2s.SwigPyIterator_copy(self)
def next(self):
return _smg2s.SwigPyIterator_next(self)
def __next__(self):
return _smg2s.SwigPyIterator___next__(self)
def previous(self):
return _smg2s.SwigPyIterator_previous(self)
def advance(self, n):
return _smg2s.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _smg2s.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _smg2s.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _smg2s.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _smg2s.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _smg2s.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _smg2s.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _smg2s.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def factorial(start, end):
return _smg2s.factorial(start, end)
factorial = _smg2s.factorial
def import_mpi4py():
return _smg2s.import_mpi4py()
import_mpi4py = _smg2s.import_mpi4py
class NilpotencyInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, NilpotencyInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, NilpotencyInt, name)
__repr__ = _swig_repr
__swig_setmethods__["diagPosition"] = _smg2s.NilpotencyInt_diagPosition_set
__swig_getmethods__["diagPosition"] = _smg2s.NilpotencyInt_diagPosition_get
if _newclass:
diagPosition = _swig_property(_smg2s.NilpotencyInt_diagPosition_get, _smg2s.NilpotencyInt_diagPosition_set)
__swig_setmethods__["nbOne"] = _smg2s.NilpotencyInt_nbOne_set
__swig_getmethods__["nbOne"] = _smg2s.NilpotencyInt_nbOne_get
if _newclass:
nbOne = _swig_property(_smg2s.NilpotencyInt_nbOne_get, _smg2s.NilpotencyInt_nbOne_set)
__swig_setmethods__["matrix_size"] = _smg2s.NilpotencyInt_matrix_size_set
__swig_getmethods__["matrix_size"] = _smg2s.NilpotencyInt_matrix_size_get
if _newclass:
matrix_size = _swig_property(_smg2s.NilpotencyInt_matrix_size_get, _smg2s.NilpotencyInt_matrix_size_set)
__swig_setmethods__["nilpotency"] = _smg2s.NilpotencyInt_nilpotency_set
__swig_getmethods__["nilpotency"] = _smg2s.NilpotencyInt_nilpotency_get
if _newclass:
nilpotency = _swig_property(_smg2s.NilpotencyInt_nilpotency_get, _smg2s.NilpotencyInt_nilpotency_set)
__swig_setmethods__["setup"] = _smg2s.NilpotencyInt_setup_set
__swig_getmethods__["setup"] = _smg2s.NilpotencyInt_setup_get
if _newclass:
setup = _swig_property(_smg2s.NilpotencyInt_setup_get, _smg2s.NilpotencyInt_setup_set)
def __init__(self, *args):
this = _smg2s.new_NilpotencyInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def NilpType1(self, num, size):
return _smg2s.NilpotencyInt_NilpType1(self, num, size)
def NilpType2(self, num, size):
return _smg2s.NilpotencyInt_NilpType2(self, num, size)
def NilpType3(self, diagP, num, size):
return _smg2s.NilpotencyInt_NilpType3(self, diagP, num, size)
__swig_destroy__ = _smg2s.delete_NilpotencyInt
__del__ = lambda self: None
NilpotencyInt_swigregister = _smg2s.NilpotencyInt_swigregister
NilpotencyInt_swigregister(NilpotencyInt)
class parMatrixSparseRealDoubleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealDoubleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealDoubleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealDoubleInt_CSR_lloc_get, _smg2s.parMatrixSparseRealDoubleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealDoubleInt_CSR_gloc_get, _smg2s.parMatrixSparseRealDoubleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealDoubleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealDoubleInt_CSR_loc_get, _smg2s.parMatrixSparseRealDoubleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealDoubleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealDoubleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealDoubleInt_dynmat_loc_get, _smg2s.parMatrixSparseRealDoubleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealDoubleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealDoubleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealDoubleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealDoubleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealDoubleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealDoubleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealDoubleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealDoubleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealDoubleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealDoubleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleInt_AM(self, nilp, prod)
parMatrixSparseRealDoubleInt_swigregister = _smg2s.parMatrixSparseRealDoubleInt_swigregister
parMatrixSparseRealDoubleInt_swigregister(parMatrixSparseRealDoubleInt)
def smg2sRealDoubleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealDoubleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealDoubleInt = _smg2s.smg2sRealDoubleInt
class parMatrixSparseRealDoubleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealDoubleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealDoubleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealDoubleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseRealDoubleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealDoubleLongInt_CSR_gloc_get, _smg2s.parMatrixSparseRealDoubleLongInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealDoubleLongInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealDoubleLongInt_CSR_loc_get, _smg2s.parMatrixSparseRealDoubleLongInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealDoubleLongInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealDoubleLongInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealDoubleLongInt_dynmat_loc_get, _smg2s.parMatrixSparseRealDoubleLongInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealDoubleLongInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealDoubleLongInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleLongInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleLongInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleLongInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealDoubleLongInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealDoubleLongInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleLongInt_AM(self, nilp, prod)
parMatrixSparseRealDoubleLongInt_swigregister = _smg2s.parMatrixSparseRealDoubleLongInt_swigregister
parMatrixSparseRealDoubleLongInt_swigregister(parMatrixSparseRealDoubleLongInt)
def smg2sRealDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealDoubleLongInt = _smg2s.smg2sRealDoubleLongInt
class parMatrixSparseRealSingleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealSingleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealSingleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_lloc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_gloc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_loc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_dynmat_loc_get, _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealSingleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealSingleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealSingleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealSingleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealSingleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealSingleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleInt_AM(self, nilp, prod)
parMatrixSparseRealSingleInt_swigregister = _smg2s.parMatrixSparseRealSingleInt_swigregister
parMatrixSparseRealSingleInt_swigregister(parMatrixSparseRealSingleInt)
def smg2sRealSingleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealSingleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealSingleInt = _smg2s.smg2sRealSingleInt
class parMatrixSparseRealSingleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealSingleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealSingleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_get, _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealSingleLongInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealSingleLongInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleLongInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleLongInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealSingleLongInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealSingleLongInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleLongInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleLongInt_AM(self, nilp, prod)
parMatrixSparseRealSingleLongInt_swigregister = _smg2s.parMatrixSparseRealSingleLongInt_swigregister
parMatrixSparseRealSingleLongInt_swigregister(parMatrixSparseRealSingleLongInt)
def smg2sRealSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealSingleLongInt = _smg2s.smg2sRealSingleLongInt
class parMatrixSparseComplexDoubleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexDoubleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexDoubleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_get, _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseComplexDoubleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseComplexDoubleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseComplexDoubleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseComplexDoubleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleInt_AM(self, nilp, prod)
parMatrixSparseComplexDoubleInt_swigregister = _smg2s.parMatrixSparseComplexDoubleInt_swigregister
parMatrixSparseComplexDoubleInt_swigregister(parMatrixSparseComplexDoubleInt)
def smg2sComplexDoubleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sComplexDoubleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sComplexDoubleInt = _smg2s.smg2sComplexDoubleInt
class parMatrixSparseComplexDoubleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexDoubleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexDoubleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_get, _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleLongInt_CSR_loc_get, _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleLongInt_dynmat_loc_get, _smg2s.parMatrixSparseComplexDoubleLongInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseComplexDoubleLongInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseComplexDoubleLongInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleLongInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleLongInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleLongInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleLongInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleLongInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleLongInt_AM(self, nilp, prod)
parMatrixSparseComplexDoubleLongInt_swigregister = _smg2s.parMatrixSparseComplexDoubleLongInt_swigregister
parMatrixSparseComplexDoubleLongInt_swigregister(parMatrixSparseComplexDoubleLongInt)
def smg2sComplexDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sComplexDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sComplexDoubleLongInt = _smg2s.smg2sComplexDoubleLongInt
class parMatrixSparseComplexSingleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexSingleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexSingleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexSingleInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexSingleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseComplexSingleInt_CSR_gloc_get, _smg2s.parMatrixSparseComplexSingleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexSingleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseComplexSingleInt_CSR_loc_get, _smg2s.parMatrixSparseComplexSingleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexSingleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexSingleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseComplexSingleInt_dynmat_loc_get, _smg2s.parMatrixSparseComplexSingleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseComplexSingleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseComplexSingleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexSingleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexSingleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseComplexSingleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseComplexSingleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseComplexSingleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseComplexSingleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexSingleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseComplexSingleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexSingleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseComplexSingleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseComplexSingleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseComplexSingleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseComplexSingleInt_AM(self, nilp, prod)
parMatrixSparseComplexSingleInt_swigregister = _smg2s.parMatrixSparseComplexSingleInt_swigregister
parMatrixSparseComplexSingleInt_swigregister(parMatrixSparseComplexSingleInt)
def smg2sComplexSingleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sComplexSingleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sComplexSingleInt = _smg2s.smg2sComplexSingleInt
class parMatrixSparseComplexSingleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexSingleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexSingleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexSingleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexSingleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseComplexSingleLongInt_CSR_gloc_get, _smg2s.parMatrixSparseComplexSingleLongInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexSingleLongInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseComplexSingleLongInt_CSR_loc_get, _smg2s.parMatrixSparseComplexSingleLongInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexSingleLongInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexSingleLongInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseComplexSingleLongInt_dynmat_loc_get, _smg2s.parMatrixSparseComplexSingleLongInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseComplexSingleLongInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseComplexSingleLongInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleLongInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleLongInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleLongInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexSingleLongInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseComplexSingleLongInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseComplexSingleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseComplexSingleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseComplexSingleLongInt_AM(self, nilp, prod)
parMatrixSparseComplexSingleLongInt_swigregister = _smg2s.parMatrixSparseComplexSingleLongInt_swigregister
parMatrixSparseComplexSingleLongInt_swigregister(parMatrixSparseComplexSingleLongInt)
def smg2sComplexSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sComplexSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sComplexSingleLongInt = _smg2s.smg2sComplexSingleLongInt
# This file is compatible with both classic and new-style classes.
| 66,793 | 24,200 |
from django.test import TestCase
from django.contrib.admin.util import NestedObjects
from models import Count
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects()
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEquals(self.n.nested(lambda obj: obj.num), target)
def _add(self, obj, parent=None):
# don't bother providing the extra args that NestedObjects ignores
self.n.add(None, None, obj, None, parent)
def test_unrelated_roots(self):
self._add(self.objs[0])
self._add(self.objs[1])
self._add(self.objs[2], self.objs[1])
self._check([0, 1, [2]])
def test_siblings(self):
self._add(self.objs[0])
self._add(self.objs[1], self.objs[0])
self._add(self.objs[2], self.objs[0])
self._check([0, [1, 2]])
def test_duplicate_instances(self):
self._add(self.objs[0])
self._add(self.objs[1])
dupe = Count.objects.get(num=1)
self._add(dupe, self.objs[0])
self._check([0, 1])
def test_non_added_parent(self):
self._add(self.objs[0], self.objs[1])
self._check([0])
def test_cyclic(self):
self._add(self.objs[0], self.objs[2])
self._add(self.objs[1], self.objs[0])
self._add(self.objs[2], self.objs[1])
self._add(self.objs[0], self.objs[2])
self._check([0, [1, [2]]])
| 1,545 | 576 |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.impute import SimpleImputer
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=42)
imputer = SimpleImputer(strategy="median")
imputer.fit(training_features)
training_features = imputer.transform(training_features)
testing_features = imputer.transform(testing_features)
# Average CV score on the training set was: 0.889950753668092
exported_pipeline = XGBClassifier(learning_rate=1.0, max_depth=1, min_child_weight=10, n_estimators=100, n_jobs=1, subsample=0.6000000000000001, verbosity=0)
# Fix random state in exported estimator
if hasattr(exported_pipeline, 'random_state'):
setattr(exported_pipeline, 'random_state', 42)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 1,188 | 414 |
"""Unittest for all present urls
"""
from django.test import TestCase
from django.contrib.auth.models import User
from espressodb.base.utilities.apps import get_apps_slug_map
import espressodb.base.utilities.blackmagicsorcery as re
URLS = ["/", "/populate/", "/populate-result/"]
LOGGED_IN_URLS = [
"/notifications/",
"/notifications/debug/",
"/notifications/info/",
"/notifications/warning/",
"/notifications/error/",
"/admin/",
"/admin/auth/group/",
"/admin/auth/user/",
"/admin/notifications/notification/",
]
class URLViewTest(TestCase):
"""Tests if all urls are present
"""
exclude_urls = []
@classmethod
def url_excluded(cls, url: str) -> bool:
"""Checks if the url is in the exclude_urls pattern list
Arguments:
url: Regex pattern to match.
"""
return any([re.match(pattern, url) is not None for pattern in cls.exclude_urls])
def setUp(self):
"""Create a user for the test
"""
self.username = "test user"
self.password = "admin1234"
user = User.objects.create(username=self.username)
user.set_password(self.password)
user.save()
def test_open_urls(self):
"""Tests the HTTP status of the client.
"""
for url in URLS:
if self.url_excluded(url):
continue
with self.subTest(url=url):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_logged_in_urls_as_logged_out(self):
"""Tests wether login required URLS are present but require login.
"""
for url in LOGGED_IN_URLS:
if self.url_excluded(url):
continue
with self.subTest(url=url):
with self.subTest(follow=False):
response = self.client.get(url, follow=False)
self.assertEqual(response.status_code, 302)
with self.subTest(follow=True):
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.redirect_chain[-1][0],
("/admin" if "admin" in url else "") + f"/login/?next={url}",
)
def test_logged_in_urls_as_logged_in(self):
"""Tests wether login required URLS are present and viewable by logged in user.
"""
login = self.client.login(username=self.username, password=self.password)
self.assertTrue(login)
for url in LOGGED_IN_URLS:
if self.url_excluded(url):
continue
with self.subTest(url=url):
response = self.client.get(url)
self.assertEqual(response.status_code, 302 if "admin" in url else 200)
def test_documentation_pages(self):
"""Tests wether documentation pages are present for each project app with models.
"""
for app_slug, app in get_apps_slug_map().items():
if not app.get_models():
continue
url = f"/documentation/{app_slug}/"
with self.subTest(app=app, url=url):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| 3,379 | 985 |
###########################
#
# #616 Creative numbers - Project Euler
# https://projecteuler.net/problem=616
#
# Code by Kevin Marciniak
#
###########################
| 167 | 51 |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
from base import BaseObject
class DigraphTextCleanser(BaseObject):
"""
Purpose:
Edge Generation for a graphviz.Digraph object
Traceability:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
"""
def __init__(self,
graph_style: dict,
is_debug: bool = True):
"""
Created:
21-Nov-2019
craig.trim@ibm.com
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1426#issuecomment-16165027
:param graph_style:
a graph style defined in a graph stylesheet
e.g.:
- resources/config/graph/graphviz_nlp_graph.yml
- resources/config/graph/graphviz_big_graph.yml
:param is_debug:
True increase log output at DEBUG level
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._graph_style = graph_style
def process(self,
some_text: str) -> str:
"""
Purpose:
determine whether to split the text for readability
:param some_text:
input text
:return:
(optionally) processed text
"""
if "graph" not in self._graph_style:
return some_text
if "split_text" not in self._graph_style["graph"]:
return some_text
if not self._graph_style["graph"]["split_text"]:
return some_text
if " " not in some_text:
return some_text
tokens = some_text.split(" ")
return "{}\\n{}".format(tokens[0], " ".join(tokens[1:]))
| 1,723 | 526 |
import itertools
import sqlglot.expressions as exp
from sqlglot.errors import OptimizeError
from sqlglot.optimizer.schema import ensure_schema
from sqlglot.optimizer.scope import traverse_scope
def qualify_columns(expression, schema):
"""
Rewrite sqlglot AST to have fully qualified columns.
Example:
>>> import sqlglot
>>> schema = {"tbl": {"col": "INT"}}
>>> expression = sqlglot.parse_one("SELECT col FROM tbl")
>>> qualify_columns(expression, schema).sql()
'SELECT tbl.col AS col FROM tbl'
Args:
expression (sqlglot.Expression): expression to qualify
schema (dict|sqlglot.optimizer.Schema): Database schema
Returns:
sqlglot.Expression: qualified expression
"""
schema = ensure_schema(schema)
# We'll use this when generating alias names
sequence = itertools.count()
for scope in traverse_scope(expression):
_check_union_outputs(scope)
_qualify_derived_tables(scope.ctes, scope, sequence)
_qualify_derived_tables(scope.derived_tables, scope, sequence)
_qualify_columns(scope, schema)
_expand_stars(scope, schema)
_qualify_outputs(scope)
_check_unknown_tables(scope)
return expression
def _check_union_outputs(scope):
"""Assert that the outputs of both sides of a UNION are the same"""
if not isinstance(scope.expression, exp.Union):
return
left, right = scope.union
if left.outputs != right.outputs:
raise OptimizeError(
f"UNION outputs not equal: {left.outputs} vs. {left.outputs}"
)
def _qualify_derived_tables(derived_tables, scope, sequence):
"""Ensure all derived tables have aliases"""
for derived_table in derived_tables:
table_alias = derived_table.args.get("alias")
if not table_alias:
table_alias = exp.TableAlias()
derived_table.set("alias", table_alias)
alias = table_alias.args.get("this")
if not alias:
alias = exp.to_identifier(f"_q_{next(sequence)}")
scope.rename_selectable(None, alias.name)
table_alias.set("this", alias)
# Remove any alias column list
# (e.g. SELECT ... FROM (SELECT ...) AS foo(col1, col2)
table_alias.args.pop("columns", None)
def _qualify_columns(scope, schema):
"""Disambiguate columns, ensuring each column reference specifies a selectable"""
unambiguous_columns = None # lazily loaded
for column in scope.references:
column_table = column.text("table")
column_name = column.text("this")
if (
column_table
and column_table in scope.selectables
and column_name
not in _get_selectable_columns(column_table, scope.selectables, schema)
):
raise OptimizeError(f"Unknown column: {column_name}")
if not column_table:
if unambiguous_columns is None:
selectable_columns = {
k: _get_selectable_columns(k, scope.selectables, schema)
for k in scope.referenced_selectables
}
unambiguous_columns = _get_unambiguous_columns(selectable_columns)
column_table = unambiguous_columns.get(column_name)
if not column_table and not scope.is_subquery:
raise OptimizeError(f"Ambiguous column: {column_name}")
column.set("table", exp.to_identifier(column_table))
def _expand_stars(scope, schema):
"""Expand stars to lists of column selections"""
all_new_columns = []
for expression in scope.selects:
if isinstance(expression, exp.Star):
tables = list(scope.referenced_selectables)
elif isinstance(expression, exp.Column) and isinstance(
expression.this, exp.Star
):
tables = [expression.text("table")]
else:
continue
new_columns = []
for table in tables:
if table not in scope.selectables:
raise OptimizeError(f"Unknown table: {table}")
columns = _get_selectable_columns(table, scope.selectables, schema)
for column in columns:
new_columns.append(
exp.Column(
this=exp.to_identifier(column), table=exp.to_identifier(table)
)
)
expression.replace(*new_columns)
all_new_columns.extend(new_columns)
scope.columns.extend(all_new_columns)
def _qualify_outputs(scope):
"""Ensure all output columns are aliased"""
for i, (selection, aliased_column) in enumerate(
itertools.zip_longest(scope.selects, scope.outer_column_list)
):
if isinstance(selection, exp.Column):
selection_name = selection.text("this")
new_selection = exp.alias_(selection.copy(), selection_name)
selection.replace(new_selection)
selection = new_selection
elif not isinstance(selection, exp.Alias):
selection_name = f"_col_{i}"
new_selection = exp.alias_(selection.copy(), selection_name)
selection.replace(new_selection)
selection = new_selection
if aliased_column:
selection.set("alias", exp.to_identifier(aliased_column))
def _check_unknown_tables(scope):
if scope.external_references and not scope.is_correlated_subquery:
raise OptimizeError(
f"Unknown table: {scope.external_references[0].text('table')}"
)
def _get_unambiguous_columns(selectable_columns):
"""
Find all the unambiguous columns in selectables.
Args:
selectable_columns (dict): Mapping of names to selectable columns
Returns:
dict: Mapping of column name to selectable name
"""
if not selectable_columns:
return {}
selectable_columns = list(selectable_columns.items())
first_table, first_columns = selectable_columns[0]
unambiguous_columns = {
col: first_table for col in _find_unique_columns(first_columns)
}
for table, columns in selectable_columns[1:]:
unique = _find_unique_columns(columns)
ambiguous = set(unambiguous_columns).intersection(unique)
for column in ambiguous:
unambiguous_columns.pop(column)
for column in unique.difference(ambiguous):
unambiguous_columns[column] = table
return unambiguous_columns
def _find_unique_columns(columns):
"""
Find the unique columns in a list of columns.
Example:
>>> sorted(_find_unique_columns(["a", "b", "b", "c"]))
['a', 'c']
This is necessary because duplicate column names are ambiguous.
"""
counts = {}
for column in columns:
counts[column] = counts.get(column, 0) + 1
return {column for column, count in counts.items() if count == 1}
def _get_selectable_columns(name, selectables, schema):
"""Resolve the selectable columns for a given selectable `name`"""
if name not in selectables:
raise OptimizeError(f"Unknown table: {name}")
selectable = selectables[name]
# If referencing a table, return the columns from the schema
if isinstance(selectable, exp.Table):
try:
return schema.column_names(selectable)
except Exception as e:
raise OptimizeError(str(e)) from e
# Otherwise, if referencing another scope, return that scope's outputs
return selectable.outputs
| 7,550 | 2,114 |
import tensorflow as tf
from elasticdl.python.common.constants import Mode
class CustomModel(tf.keras.Model):
def __init__(self, channel_last=True):
super(CustomModel, self).__init__(name="cifar10_model")
use_bias = True
self._conv_1 = tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_1 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_1 = tf.keras.layers.Activation(tf.nn.relu)
self._conv_2 = tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_2 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_2 = tf.keras.layers.Activation(tf.nn.relu)
self._max_pool_1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
self._dropout_1 = tf.keras.layers.Dropout(0.2)
self._conv_3 = tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_3 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_3 = tf.keras.layers.Activation(tf.nn.relu)
self._conv_4 = tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_4 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_4 = tf.keras.layers.Activation(tf.nn.relu)
self._max_pool_2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
self._dropout_2 = tf.keras.layers.Dropout(0.3)
self._conv_5 = tf.keras.layers.Conv2D(
128,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_5 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_5 = tf.keras.layers.Activation(tf.nn.relu)
self._conv_6 = tf.keras.layers.Conv2D(
128,
kernel_size=(3, 3),
padding="same",
use_bias=use_bias,
activation=None,
)
self._bn_6 = tf.keras.layers.BatchNormalization(
epsilon=1e-06, axis=-1, momentum=0.9
)
self._relu_6 = tf.keras.layers.Activation(tf.nn.relu)
self._max_pool_3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
self._dropout_3 = tf.keras.layers.Dropout(0.4)
self._flatten_1 = tf.keras.layers.Flatten()
self._dense_1 = tf.keras.layers.Dense(10, name="output")
def call(self, inputs, training=False):
x = self._conv_1(inputs["image"])
x = self._bn_1(x)
x = self._relu_1(x)
x = self._conv_2(x)
x = self._bn_2(x)
x = self._relu_2(x)
x = self._max_pool_1(x)
x = self._dropout_1(x)
x = self._conv_3(x)
x = self._bn_3(x)
x = self._relu_3(x)
x = self._conv_4(x)
x = self._bn_4(x)
x = self._relu_4(x)
x = self._max_pool_2(x)
x = self._dropout_2(x)
x = self._conv_5(x)
x = self._bn_5(x)
x = self._relu_5(x)
x = self._conv_6(x)
x = self._bn_6(x)
x = self._relu_6(x)
x = self._max_pool_3(x)
x = self._dropout_3(x)
x = self._flatten_1(x)
return self._dense_1(x)
def loss(output, labels):
labels = tf.reshape(labels, [-1])
return tf.reduce_mean(
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output, labels=labels
)
)
def optimizer(lr=0.1):
return tf.optimizers.SGD(lr)
def dataset_fn(dataset, mode):
def _parse_data(record):
if mode == Mode.PREDICTION:
feature_description = {
"image": tf.io.FixedLenFeature([32, 32, 3], tf.float32)
}
else:
feature_description = {
"image": tf.io.FixedLenFeature([32, 32, 3], tf.float32),
"label": tf.io.FixedLenFeature([1], tf.int64),
}
r = tf.io.parse_single_example(record, feature_description)
features = {
"image": tf.math.divide(tf.cast(r["image"], tf.float32), 255.0)
}
if mode == Mode.PREDICTION:
return features
else:
return features, tf.cast(r["label"], tf.int32)
dataset = dataset.map(_parse_data)
if mode != Mode.PREDICTION:
dataset = dataset.shuffle(buffer_size=1024)
return dataset
def eval_metrics_fn(predictions, labels):
labels = tf.reshape(labels, [-1])
return {
"accuracy": tf.reduce_mean(
input_tensor=tf.cast(
tf.equal(
tf.argmax(predictions, 1, output_type=tf.dtypes.int32),
labels,
),
tf.float32,
)
)
}
| 5,314 | 1,931 |
__author__ = "Sunil Kumar (kumar.sunil.p@gmail.com)"
__copyright__ = "Copyright 2014, Washington University in St. Louis"
__credits__ = ["Sunil Kumar", "Steve Pieper", "Dan Marcus"]
__license__ = "XNAT Software License Agreement " + \
"(see: http://xnat.org/about/license.php)"
__version__ = "2.1.1"
__maintainer__ = "Rick Herrick"
__email__ = "herrickr@mir.wustl.edu"
__status__ = "Production"
from __main__ import qt
comment = """
HoverButton is a customized QWidget where the
user can set the style of the button upon hovering.
TODO:
"""
class HoverButton (qt.QPushButton):
""" Descriptor above.
"""
def __init__(self, parent = None):
""" Init function.
"""
#--------------------
# Call parent init.
#--------------------
if parent:
super(HoverButton, self).__init__(parent)
else:
super(HoverButton, self).__init__(self)
#--------------------
# Install the event filter to
# interpret the hovers.
#--------------------
self.installEventFilter(self)
#--------------------
# Track the stylesheets for
# the hover/not-hovered states.
#--------------------
self.defaultStyleSheet = None
self.hoverStyleSheet = None
def setDefaultStyleSheet(self, styleSheet):
""" Set stylesheet for when the mouse is
not hovering over the button.
"""
self.defaultStyleSheet = styleSheet
self.setStyleSheet(styleSheet)
def setHoverStyleSheet(self, styleSheet):
""" Set stylesheet for when the mouse is
hovering over the button.
"""
self.hoverStyleSheet = styleSheet
def eventFilter(self, widget, event):
""" Event filter function inherited from
QObject. Specifically targets the 'Enter'
and 'Leave' events for hovering purposes.
"""
if event.type() == qt.QEvent.Enter:
self.onHoverEnter()
elif event.type() == qt.QEvent.Leave:
self.onHoverLeave()
def onHoverEnter(self):
""" Callback when the mouse begins
hovering over the button: applies the
'hoverStyleSheet'.
"""
self.setStyleSheet(self.hoverStyleSheet)
def onHoverLeave(self):
""" Callback when the mouse leaves
hovering over the button: applies the
'defaultStyleSheet'.
"""
self.setStyleSheet(self.defaultStyleSheet)
| 2,686 | 761 |
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.utils.timezone import now
from .models import Task, TaskUpdate
@receiver(pre_delete, sender=Task)
def close_task_updates(sender, instance:Task, **kwargs):
now_ts = now()
for update in TaskUpdate.objects.filter(task=instance, valid_until__gt=now_ts):
update.valid_until = now_ts
update.save()
# post_save.connect(close_task_updates, sender=Task)
| 481 | 153 |
from unittest import TestCase
import os
from musicscore.musicstream.streamvoice import SimpleFormat
from musicscore.musictree.treebeat import TreeBeat
from musicscore.musictree.treechordflags1 import PizzFlag1, PercussionFlag1
from musicscore.musictree.treemeasure import TreeMeasure
from musicscore.musictree.treepart import TreePart
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musicscore.musicxml.elements.note import Type
from tests.score_templates.xml_test_score import TestScore
path = os.path.abspath(__file__).split('.')[0]
class Test(TestCase):
def setUp(self) -> None:
self.score = TreeScoreTimewise()
def test_1(self):
# sf = SimpleFormat(durations=[4, 4, 2, 1, 1.5, 1.8, 0.2, 0.4, 0.5, 1])
sf = SimpleFormat(quarter_durations=[4, 4])
for chord in sf.chords:
# chord.add_flag(PizzFlag())
chord.add_flag(PercussionFlag1())
v = sf.to_stream_voice()
self.score.set_time_signatures([4, 3, 1])
v.add_to_score(self.score)
result_path = path + '_test_1'
# self.score.fill_with_rest()
# self.score.add_beats()
# self.score.quantize()
# for measure in self.score.get_children_by_type(TreeMeasure):
# for part in measure.get_children_by_type(TreePart):
# for beat in part.get_beats():
# new_chords = []
# for chord in beat.chords:
# if chord.is_tied_to_previous:
# chord.to_rest()
# new_chords.append(chord)
#
# elif chord.position_in_beat == 0:
# split = [chord]
# if chord.quarter_duration == 1:
# split = chord.split(1, 1)
# elif chord.quarter_duration == 2:
# split = chord.split(1, 3)
# elif chord.quarter_duration == 3:
# split = chord.split(1, 5)
# elif chord.quarter_duration == 4:
# split = chord.split(1, 7)
# elif chord.quarter_duration == 6:
# split = chord.split(1, 11)
# else:
# pass
# try:
# split[1].to_rest()
# except IndexError:
# pass
# new_chords.extend(split)
# else:
# new_chords.append(chord)
# beat._chords = new_chords
self.score.write(path=result_path)
# TestScore().assert_template(result_path=result_path)
| 2,904 | 872 |
def abreMochila(mochila) :
if len(mochila) == 0:
print('Bolso Vazio!')
return False
if len(mochila)!= 0:
print("Itens no bolso:\nFale o numero correspondente dele para escolher")
for item in mochila:
print(mochila.index(item)+1,"-",item)
i=escutar()
escolha=mochila[int(i)-1]
return(escolha) | 363 | 136 |
import json
import uuid
import datetime
import os
import socket
class jsonprocesser:
def __init__(self):
self.client_mac = str(hex(uuid.getnode()))
self.timestamp = datetime.datetime.now().strftime("%H-%M_%d-%m-%y")
#filename = client_mac + timestamp + '.json'
self.filename = os.path.abspath('results/' + self.client_mac + self.timestamp + '.json')
data = json.dumps({"UserInfo":{"user id":self.client_mac,"timestamp":self.timestamp,"ip":"null"},
"IPERF":{"TCP":{"upload":-1,"download":-1},"UDP":{"upload":-1,"download":-1,"packetloss":-1,"jitter":-1}},
"HTTP":{"GET":{"site1":-1,"site2":-1,"site3":-1,"avg":-1},"POST":{"site1":-1,"site2":-1,"site3":-1,"avg":-1}},
"TRACEROUTE":{}})
jsonFile = open(self.filename, "w+")
jsonFile.write(data)
print self.filename
def json_update_iperf(self, iperf_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["IPERF"]["TCP"]["upload"] = iperf_results["tcp_upload"]
data["IPERF"]["TCP"]["download"] = iperf_results['tcp_download']
data["IPERF"]["UDP"]["upload"] = iperf_results['udp_upload']
data["IPERF"]["UDP"]["download"] = iperf_results['udp_download']
data["IPERF"]["UDP"]["packetloss"] = iperf_results['udp_download_loss']
data["IPERF"]["UDP"]["jitter"] = iperf_results['udp_download_jitter']
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
return 0
def json_update_http(self,s1get,s2get,s3get,gavg,s1post,s2post,s3post,pavg):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["HTTP"]["GET"]["site1"] = s1get
data["HTTP"]["GET"]["site2"] = s2get
data["HTTP"]["GET"]["site3"] = s3get
data["HTTP"]["GET"]["avg"] = gavg
data["HTTP"]["POST"]["site1"] = s1post
data["HTTP"]["POST"]["site2"] = s2post
data["HTTP"]["POST"]["site3"] = s3post
data["HTTP"]["POST"]["avg"] = pavg
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
return 0
def json_update_ftp(self,down,up):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["FTP"]["download"] = down
data["FTP"]["upload"] = up
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
return 0
def json_upload(self,server_ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = open(self.filename,'rb')
l = f.read(1024)
print l
s.connect((server_ip,port))
while(l):
s.send(l)
l= f.read(1024)
f.close()
#s.shutdown(socket.SHUT_WR)
s.close
def print_json(self):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
print data
| 3,239 | 1,120 |
import copy
import os
import csv
import json
import torch
import logging
from transformers.file_utils import is_tf_available, is_torch_available
from functools import wraps
from .utils import CACHE_PARAMS
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_csv(cls, input_file, delimiter="\t", quotechar=None):
"""Reads a tab separated csv/tsv file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter=delimiter, quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
""" Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, delimiter, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
delimiter,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
delimiter,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_csv(file_name, delimiter=delimiter)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = "%s-%s" % (split_name, i) if split_name else "%s" % i
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
assert labels is None or len(texts_or_text_and_labels) == len(labels)
assert ids is None or len(texts_or_text_and_labels) == len(ids)
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors="pt",
):
"""
Convert examples in a list of ``InputFeatures``
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info("Tokenizing example %d", ex_index)
input_ids = tokenizer.encode(
example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(self.examples)))
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
assert len(input_ids) == batch_length, "Error with input length {} vs {}".format(
len(input_ids), batch_length
)
assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format(
len(attention_mask), batch_length
)
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
class SequnceTokenClassificationProcessor(DataProcessor):
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
@classmethod
def create_from_txt(cls, file_name, delimiter, **kwargs):
processor = cls(**kwargs)
processor.read_examples_from_txt(file_name, delimiter)
return processor
def read_examples_from_txt(self, file_name, delimiter):
examples = []
guid_index = 1
with open(file_name, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=guid_index,
text_a=words,
label=labels))
words = []
labels = []
guid_index +=1
else:
splits = line.split(delimiter)
if len(splits) > 1:
words.append(splits[0])
labels.append(splits[-1].replace("\n", ""))
else:
labels.append("O")
if words:
examples.append(InputExample(guid=guid_index, text_a=words,
label=labels))
self.examples = examples
def get_features(
self,
max_seq_length,
tokenizer,
return_tensors,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=0,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(self.labels)}
features = [] # [[inout_ids, input_mask, segment_ids, label_id]]
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(self.examples))
tokens = []
label_ids = []
for word, label in zip(example.text_a, example.label):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # some language like german one word will be tokenized to several subwords
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [pad_token_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens) # 输入的是列表, 返回的也是列表
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
# 保证我的input_id, input_mask, segment_ids, 和label_ids都是一致的
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label=label_ids)
) # note segement_ids has 1 in the front? but we don't use it right now
# fetures to dataset
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
############## help functions ######
def load_and_cache_dataset(func):
@wraps(func)
def inner(*args, **kwargs):
logger = logging.getLogger("Load-Cache_Dataset")
cached_features_file = os.path.join(
CACHE_PARAMS["data_dir"],
"cached_{}_{}_{}".format(
CACHE_PARAMS["mode"], list(filter(None, CACHE_PARAMS["model_name_or_path"].split("/"))).pop(),
str(CACHE_PARAMS["max_seq_length"])
),)
if os.path.exists(cached_features_file):
dataset = torch.load(cached_features_file)
logger.info("Load dataset from {}".format(cached_features_file))
return dataset
else:
logger.info("Read data and preparing dataset")
dataset = func(*args, **kwargs)
torch.save(dataset, cached_features_file)
logger.info("Cached dataset at {}".format(cached_features_file))
return dataset
return inner
# if __name__ == "__main__":
# from transformers import BertTokenizer
# processer = SingleSentenceClassificationProcessor.create_from_csv(file_name="test_data.tsv", delimiter=",")
# tokenizer = BertTokenizer.from_pretrained("/Users/jiangchaodi/Code/NLP/transformer_examples/models/chinese_L-12_H-768_A-12")
# dataset = processer.get_features(tokenizer=tokenizer, max_length=128)
# print(dataset)
# test torch.load(dataset)
# import torch
# torch.save(dataset, "test_cached")
# dataset2 = torch.load("test_cached")
# print(dataset2)
# if __name__ == "__main__":
# from transformers import BertTokenizer
# from pipelines import get_labels
# processer = SequnceTokenClassificationProcessor.create_from_txt(file_name="/Users/jiangchaodi/Code/NLP/fasttransformer/fst2/data/train.txt", delimiter="\t", labels=get_labels("/Users/jiangchaodi/Code/NLP/fasttransformer/fst2/data/labels.txt"))
# tokenizer = BertTokenizer.from_pretrained("/Users/jiangchaodi/Code/NLP/transformer_examples/models/chinese_L-12_H-768_A-12")
# dataset = processer.get_features(tokenizer=tokenizer, max_seq_length=128, return_tensors="pt")
# print(dataset)
| 24,101 | 7,001 |
from .dbg import run_in_debugger, start_debugger
from .utils import debug_sample
# suppress pyflakes "imported but unused" warnings:
run_in_debugger, start_debugger
debug_sample
| 180 | 61 |
import logging
import click
class ClickLogHandler(logging.Handler):
_use_stderr = True
def emit(self, record):
try:
msg = self.format(record)
click.echo(msg, err=self._use_stderr)
except Exception:
self.handleError(record)
def setup_logger(logger: logging.Logger) -> None:
"""Add handler to log to click."""
try:
import click_log
except ImportError:
logger.addHandler(ClickLogHandler())
else:
click_log.basic_config(logger)
| 530 | 155 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Showa Denko Materials co., Ltd. All rights reserved.
This software is for non-profit use only.
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
"""
import GPyOpt
from GPyOpt.optimization.optimizer import OptLbfgs, OptDirect, OptCma, apply_optimizer
from GPyOpt.optimization.anchor_points_generator import ObjectiveAnchorPointsGenerator, ThompsonSamplingAnchorPointsGenerator
max_objective_anchor_points_logic = "max_objective"
thompson_sampling_anchor_points_logic = "thompsom_sampling"
sobol_design_type = "sobol"
random_design_type = "random"
class InvalidArgumentError(Exception):
pass
class AcquisitionOptimizer(GPyOpt.optimization.AcquisitionOptimizer):
"""
AcquisitionOptimizer of GPyOpt was modified to control some parameters including, max_AcOpt_iter and num_anchor_points.
Note that the default paramaters of GPyOpt were used in the study of the goal-oriented Bayesian optimization.
:param space: design space class from GPyOpt.
:param optimizer: optimizer to use. Can be selected among:
- 'lbfgs': L-BFGS.
- 'DIRECT': Dividing Rectangles.
- 'CMA': covariance matrix adaptation.
:param max_AcOpt_iter: maximun number of optimization steps.
:param num_anchor_points: number of initial search points.
"""
def __init__(self, space, optimizer='lbfgs', max_AcOpt_iter=1000, num_anchor_points=1000, **kwargs):
super(AcquisitionOptimizer, self).__init__(space, optimizer, **kwargs)
self.max_AcOpt_iter = max_AcOpt_iter
self.num_anchor_points = num_anchor_points
def optimize(self, f=None, df=None, f_df=None, duplicate_manager=None):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.optimizer = self.choose_optimizermod(self.optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f, num_samples=self.num_anchor_points)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points = anchor_points_generator.get(num_anchor=5, duplicate_manager=duplicate_manager, context_manager=self.context_manager)
## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)
optimized_points = [apply_optimizer(self.optimizer, a, f=f, df=df, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t:t[1])
return x_min, fx_min
def choose_optimizermod(self, optimizer_name, bounds):
"""
Selects the type of local optimizer
"""
if optimizer_name == 'lbfgs':
optimizer = OptLbfgs(bounds, self.max_AcOpt_iter)
elif optimizer_name == 'DIRECT':
optimizer = OptDirect(bounds, self.max_AcOpt_iter)
elif optimizer_name == 'CMA':
optimizer = OptCma(bounds, self.max_AcOpt_iter)
else:
print(optimizer_name)
raise InvalidArgumentError('Invalid optimizer selected.')
return optimizer
| 4,401 | 1,397 |
"""Tests for credit addition, subtraction and querying.
See also tests in test_user_api
"""
import unittest
import requests
from eos_db.server import choose_engine, create_user, touch_to_add_credit
from eos_db.server import check_credit, check_actor_id
class TestCreditFunctions(unittest.TestCase):
"""Tests credit functions in server module."""
def setUp(self):
choose_engine('SQLite')
def test_create_user(self):
"""
Add a user.
"""
user = create_user('user','testuser','testuser','testuser')
self.assertEqual(check_actor_id(user), user)
def test_add(self):
"""
Behaviour: Calling the API to add credit should result credit being added to
the database.
"""
user = create_user('user','testuser2','testuser2','testuser2')
touch_to_add_credit(user,1000)
credit = check_credit(user)
self.assertEqual(credit, 1000)
def test_subtract(self):
"""
Behaviour: Calling the API to add credit should result credit being
subtracted from the database.
"""
user = create_user('user', 'testuser3', 'testuser3', 'testuser3')
touch_to_add_credit(user,-500)
credit = check_credit(user)
self.assertEqual(credit, -500)
if __name__ == '__main__':
unittest.main()
| 1,354 | 424 |
#ce stevilko zapisemo kot string je problem super enostaven
stevilka = "37107287533902102798797998220837590246510135740250 \n\
46376937677490009712648124896970078050417018260538 \n\
74324986199524741059474233309513058123726617309629 \n\
91942213363574161572522430563301811072406154908250 \n\
23067588207539346171171980310421047513778063246676 \n\
89261670696623633820136378418383684178734361726757 \n\
28112879812849979408065481931592621691275889832738 \n\
44274228917432520321923589422876796487670272189318 \n\
47451445736001306439091167216856844588711603153276 \n\
70386486105843025439939619828917593665686757934951 \n\
62176457141856560629502157223196586755079324193331 \n\
64906352462741904929101432445813822663347944758178 \n\
92575867718337217661963751590579239728245598838407 \n\
58203565325359399008402633568948830189458628227828 \n\
80181199384826282014278194139940567587151170094390 \n\
35398664372827112653829987240784473053190104293586 \n\
86515506006295864861532075273371959191420517255829 \n\
71693888707715466499115593487603532921714970056938 \n\
54370070576826684624621495650076471787294438377604 \n\
53282654108756828443191190634694037855217779295145 \n\
36123272525000296071075082563815656710885258350721 \n\
45876576172410976447339110607218265236877223636045 \n\
17423706905851860660448207621209813287860733969412 \n\
81142660418086830619328460811191061556940512689692 \n\
51934325451728388641918047049293215058642563049483 \n\
62467221648435076201727918039944693004732956340691 \n\
15732444386908125794514089057706229429197107928209 \n\
55037687525678773091862540744969844508330393682126 \n\
18336384825330154686196124348767681297534375946515 \n\
80386287592878490201521685554828717201219257766954 \n\
78182833757993103614740356856449095527097864797581 \n\
16726320100436897842553539920931837441497806860984 \n\
48403098129077791799088218795327364475675590848030 \n\
87086987551392711854517078544161852424320693150332 \n\
59959406895756536782107074926966537676326235447210 \n\
69793950679652694742597709739166693763042633987085 \n\
41052684708299085211399427365734116182760315001271 \n\
65378607361501080857009149939512557028198746004375 \n\
35829035317434717326932123578154982629742552737307 \n\
94953759765105305946966067683156574377167401875275 \n\
88902802571733229619176668713819931811048770190271 \n\
25267680276078003013678680992525463401061632866526 \n\
36270218540497705585629946580636237993140746255962 \n\
24074486908231174977792365466257246923322810917141 \n\
91430288197103288597806669760892938638285025333403 \n\
34413065578016127815921815005561868836468420090470 \n\
23053081172816430487623791969842487255036638784583 \n\
11487696932154902810424020138335124462181441773470 \n\
63783299490636259666498587618221225225512486764533 \n\
67720186971698544312419572409913959008952310058822 \n\
95548255300263520781532296796249481641953868218774 \n\
76085327132285723110424803456124867697064507995236 \n\
37774242535411291684276865538926205024910326572967 \n\
23701913275725675285653248258265463092207058596522 \n\
29798860272258331913126375147341994889534765745501 \n\
18495701454879288984856827726077713721403798879715 \n\
38298203783031473527721580348144513491373226651381 \n\
34829543829199918180278916522431027392251122869539 \n\
40957953066405232632538044100059654939159879593635 \n\
29746152185502371307642255121183693803580388584903 \n\
41698116222072977186158236678424689157993532961922 \n\
62467957194401269043877107275048102390895523597457 \n\
23189706772547915061505504953922979530901129967519 \n\
86188088225875314529584099251203829009407770775672 \n\
11306739708304724483816533873502340845647058077308 \n\
82959174767140363198008187129011875491310547126581 \n\
97623331044818386269515456334926366572897563400500 \n\
42846280183517070527831839425882145521227251250327 \n\
55121603546981200581762165212827652751691296897789 \n\
32238195734329339946437501907836945765883352399886 \n\
75506164965184775180738168837861091527357929701337 \n\
62177842752192623401942399639168044983993173312731 \n\
32924185707147349566916674687634660915035914677504 \n\
99518671430235219628894890102423325116913619626622 \n\
73267460800591547471830798392868535206946944540724 \n\
76841822524674417161514036427982273348055556214818 \n\
97142617910342598647204516893989422179826088076852 \n\
87783646182799346313767754307809363333018982642090 \n\
10848802521674670883215120185883543223812876952786 \n\
71329612474782464538636993009049310363619763878039 \n\
62184073572399794223406235393808339651327408011116 \n\
66627891981488087797941876876144230030984490851411 \n\
60661826293682836764744779239180335110989069790714 \n\
85786944089552990653640447425576083659976645795096 \n\
66024396409905389607120198219976047599490197230297 \n\
64913982680032973156037120041377903785566085089252 \n\
16730939319872750275468906903707539413042652315011 \n\
94809377245048795150954100921645863754710598436791 \n\
78639167021187492431995700641917969777599028300699 \n\
15368713711936614952811305876380278410754449733078 \n\
40789923115535562561142322423255033685442488917353 \n\
44889911501440648020369068063960672322193204149535 \n\
41503128880339536053299340368006977710650566631954 \n\
81234880673210146739058568557934581403627822703280 \n\
82616570773948327592232845941706525094512325230608 \n\
22918802058777319719839450180888072429661980811197 \n\
77158542502016545090413245809786882778948721859617 \n\
72107838435069186155435662884062257473692284509516 \n\
20849603980134001723930671666823555245252804609722 \n\
53503534226472524250874054075591789781264330331690"
stevilka = [int(i) for i in stevilka.split(" \n")]
vsota = sum(stevilka)
while len(str(vsota)) > 10:
vsota = (vsota - vsota%10)//10
print(vsota)
| 5,727 | 5,500 |
import gym # type: ignore
import matplotlib # type: ignore
import matplotlib.pyplot as plt # type: ignore
import numpy as np
from pathlib import Path # type: ignore
from typing import List, Union, Literal, Dict, Any
from visualization.data_parser import Records, D3rlpyCSVDataParser
def plot_records_list(
axes: matplotlib.axes.Axes,
records_list: List[Records],
env_name: str,
value_description: str = 'loss',
horizon_name: Union[Literal['epochs', 'steps']] = 'epochs',
**kwargs: Any # arguments to the plot function
) -> None:
"""
Plot the graph of different algorithms,
each algorithm contains multiple experiments,
all experiments are from the same environment
"""
assert len(records_list) > 0, "Can not pass in empty records."
# group them together
algo_to_records: Dict[str, List[Records]] = {}
for records in records_list:
algo_name = records.algo_name
if algo_name not in algo_to_records:
algo_to_records[algo_name] = []
algo_to_records[algo_name].append(records)
# make sure all algorithms have the same number of experiments
experiment_counts = set([len(data) for data in algo_to_records.values()])
assert len(experiment_counts) == 1, \
"All algorithms should have the same number of experiments"
# truncate horizon (assuming monotonic increasing)
min_horizon = min([len(records.get_data()[horizon_name]) for records in records_list])
for algo_name in sorted(algo_to_records.keys()):
print(algo_name)
algo_records_list = algo_to_records[algo_name]
horizon = algo_records_list[0].get_data(min_horizon)[horizon_name]
values = np.array([records.get_data(min_horizon)['values'] for records in algo_records_list])
value_mean = np.mean(values, axis=0)
value_std = np.std(values, axis=0)
axes.plot(horizon, value_mean, **kwargs)
axes.fill_between(horizon, value_mean - value_std, value_mean + value_std, alpha=0.2, interpolate=True)
axes.set_title('{}: {} plots of {} over {} trials'.format(
env_name, value_description, horizon_name, next(iter(experiment_counts))))
axes.set_ylabel(value_description)
axes.set_xlabel(horizon_name)
axes.legend(sorted(list(algo_to_records.keys())))
def plot_records_in_dir(
log_dir: str,
env_name: str,
value_description: str = 'loss',
horizon_name: Union[Literal['epochs', 'steps']] = 'epochs',
**kwargs: Any
) -> None:
log_dir_path = Path(log_dir)
assert log_dir_path.is_dir(), "Invalid log dir."
parser = D3rlpyCSVDataParser()
records_list: List[Records] = []
for sub_dir in log_dir_path.iterdir():
records_list.append(parser.parse(str(sub_dir), value_description=value_description))
plot_records_list(plt.gca(), records_list, env_name, value_description, horizon_name, **kwargs)
plt.show()
| 2,733 | 1,010 |
import numpy
import random
from enum import Enum
class Direction(Enum):
kUp, kRight, kDown, kLeft = range(4)
class GameMatrix(object):
def __init__(self, dim):
self.dim = dim
self.matrix = numpy.zeros((dim, dim))
self.init_matrix()
self.tube = []
def init_matrix(self):
indices = [i for i in range(self.dim * self.dim)]
random.shuffle(indices)
pick_number = random.choices([2, 4], k=2)
self.matrix[indices[0] // self.dim][indices[0] % self.dim] = pick_number[0]
self.matrix[indices[1] // self.dim][indices[1] % self.dim] = pick_number[1]
def random_pick_empty(self):
empty_spots = []
for i in range(self.dim):
for j in range(self.dim):
if self.matrix[i][j] == 0:
empty_spots.append([i, j])
return random.choices(empty_spots)[0]
def random_add_one(self):
my_pick = self.random_pick_empty()
self.matrix[my_pick[0]][my_pick[1]] = random.choices([2, 4], weights=[.5, .5])[0]
def tube_append(self, elem):
if elem != 0:
if len(self.tube) != 0 and self.tube[-1] == elem:
self.tube[-1] *= 2
else:
self.tube.append(elem)
def move(self, direction):
if direction == Direction.kDown:
for j in range(self.dim):
for i in range(self.dim):
elem = self.matrix[self.dim - 1 - i][j]
self.tube_append(elem)
self.matrix[self.dim - 1 - i][j] = 0
for i in range(len(self.tube)):
self.matrix[self.dim - 1 - i][j] = self.tube[i]
self.tube.clear()
elif direction == Direction.kUp:
for j in range(self.dim):
for i in range(self.dim):
elem = self.matrix[i][j]
self.tube_append(elem)
self.matrix[i][j] = 0
for i in range(len(self.tube)):
self.matrix[i][j] = self.tube[i]
self.tube.clear()
elif direction == Direction.kLeft:
for i in range(self.dim):
for j in range(self.dim):
elem = self.matrix[i][j]
self.tube_append(elem)
self.matrix[i][j] = 0
for j in range(len(self.tube)):
self.matrix[i][j] = self.tube[j]
self.tube.clear()
elif direction == Direction.kRight:
for i in range(self.dim):
for j in range(self.dim):
elem = self.matrix[i][self.dim - 1 - j]
self.tube_append(elem)
self.matrix[i][self.dim - 1 - j] = 0
for j in range(len(self.tube)):
self.matrix[i][self.dim - 1 - j] = self.tube[j]
self.tube.clear()
| 2,947 | 964 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
def parse_json(value):
return json.dumps(json.loads(re.sub('^.*\n#STARTJSON\n', '', value, flags=re.DOTALL)), indent=4, sort_keys=True)
class FilterModule(object):
def filters(self):
return {
'parse_json': parse_json,
}
| 372 | 126 |
"""
pytracemalloc.py
Copyright 2015 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import sys
import gc
import cPickle
def user_wants_pytracemalloc():
_should_profile = os.environ.get('W3AF_PYTRACEMALLOC', '0')
if _should_profile.isdigit() and int(_should_profile) == 1:
return True
return False
if user_wants_pytracemalloc():
try:
# User's don't need this module, and installation is complex
# http://pytracemalloc.readthedocs.org/install.html
import tracemalloc
except ImportError, ie:
print('Failed to import tracemalloc: %s' % ie)
sys.exit(-1)
from .utils import get_filename_fmt, dump_data_every_thread, cancel_thread
PROFILING_OUTPUT_FMT = '/tmp/w3af-%s-%s.tracemalloc'
DELAY_MINUTES = 2
SAVE_TRACEMALLOC_PTR = []
def should_dump_tracemalloc(wrapped):
def inner():
if user_wants_pytracemalloc():
return wrapped()
return inner
@should_dump_tracemalloc
def start_tracemalloc_dump():
"""
If the environment variable W3AF_PYTRACEMALLOC is set to 1, then we start
the thread that will dump the memory usage data which can be retrieved
using tracemalloc module.
:return: None
"""
# save 25 frames
tracemalloc.start(25)
dump_data_every_thread(dump_tracemalloc, DELAY_MINUTES, SAVE_TRACEMALLOC_PTR)
def dump_tracemalloc():
"""
Dumps memory usage information to file
"""
gc.collect()
snapshot = tracemalloc.take_snapshot()
output_file = PROFILING_OUTPUT_FMT % get_filename_fmt()
with open(output_file, 'wb') as fp:
cPickle.dump(snapshot, fp, 2)
# Make sure the snapshot goes away
snapshot = None
@should_dump_tracemalloc
def stop_tracemalloc_dump():
"""
Save profiling information (if available)
"""
cancel_thread(SAVE_TRACEMALLOC_PTR)
dump_tracemalloc()
| 2,509 | 872 |
import json
from collections import defaultdict
from typing import Dict, List, Union
import numpy as np
import pandas as pd
from marshmallow import EXCLUDE, Schema, ValidationError, fields, validate
from cognite.model_hosting._cognite_model_hosting_common.utils import timestamp_to_ms
from cognite.model_hosting.schedules.exceptions import DuplicateAliasInScheduledOutput, InvalidScheduleOutputFormat
def to_output(dataframe: Union[pd.DataFrame, List[pd.DataFrame]]) -> Dict:
"""Converts your data to a json serializable output format complying with the schedules feature.
Args:
dataframe (Union[List[pd.DataFrame, pd.DataFrame]]: A dataframe or list of dataframes.
Returns:
Dict: The data on a json serializable and schedules compliant output format.
Examples:
The correct output format looks like this::
{
"timeSeries":
{
"my-alias-1": [(t0, p0), (t1, p1), ...],
"my-alias-2": [(t0, p0), (t1, p1), ...],
}
}
"""
output = defaultdict(lambda: {})
if isinstance(dataframe, pd.DataFrame):
output["timeSeries"] = _convert_df_to_output_format(dataframe)
elif isinstance(dataframe, List):
for df in dataframe:
if set(df.columns) - set(output["timeSeries"].keys()) != set(df.columns):
raise DuplicateAliasInScheduledOutput("An alias has been provided multiple times")
output["timeSeries"].update(_convert_df_to_output_format(df))
else:
raise TypeError("dataframe should be a pandas DataFrame or list of pandas DataFrames")
return output
def _convert_df_to_output_format(df: pd.DataFrame):
return {name: list(zip([timestamp_to_ms(ts) for ts in df.index], df[name])) for name in df.columns}
class _ScheduleOutputSchema(Schema):
class Meta:
unknown = EXCLUDE
timeSeries = fields.Dict(
keys=fields.Str(), values=fields.List(fields.List(fields.Float(), validate=validate.Length(equal=2)))
)
_schedule_output_schema = _ScheduleOutputSchema(unknown=EXCLUDE)
class ScheduleOutput:
"""Helper class for parsing and converting output from scheduled predictions.
Args:
output(Dict): The output returned from the scheduled prediction.
"""
def __init__(self, output: Dict):
self._output = self._load(output)
def __str__(self):
return json.dumps(self._output, indent=4, sort_keys=True)
@staticmethod
def _load(output):
try:
return _schedule_output_schema.load(output)
except ValidationError as e:
raise InvalidScheduleOutputFormat(e.messages) from e
def _validate_alias(self, type: str, alias: str):
assert self._output.get(type, {}).get(alias) is not None, "{} is not a valid alias".format(alias)
def _validate_aligned(self, aliases: List[str]):
timestamps = set()
for alias in aliases:
self._validate_alias("timeSeries", alias)
timestamps.add(tuple(point[0] for point in self._output["timeSeries"][alias]))
assert 1 == len(timestamps), "Timestamps for aliases {} are not aligned".format(aliases)
def _get_dataframe_single_alias(self, alias) -> pd.DataFrame:
self._validate_alias("timeSeries", alias)
data = self._output["timeSeries"][alias]
timestamps = [int(point[0]) for point in data]
datapoints = [point[1] for point in data]
return pd.DataFrame({alias: datapoints}, index=np.array(timestamps, dtype="datetime64[ms]"))
def _get_dataframe_multiple_aliases(self, aliases: List[str]) -> pd.DataFrame:
self._validate_aligned(aliases)
data = {}
timestamps = [int(p[0]) for p in self._output["timeSeries"][aliases[0]]]
for a in aliases:
data[a] = [p[1] for p in self._output["timeSeries"][a]]
return pd.DataFrame(data, index=np.array(timestamps, dtype="datetime64[ms]"))
def get_dataframe(self, alias: Union[str, List[str]]) -> pd.DataFrame:
"""Returns a time-aligned dataframe of the specified alias(es).
Assumes that all aliases specify output time series with matching timestamps.
Args:
alias(Union[str, List[str]]): alias or list of aliases
Returns:
pd.DataFrame: The dataframe containing the time series for the specified alias(es).
"""
if isinstance(alias, str):
return self._get_dataframe_single_alias(alias)
elif isinstance(alias, List):
return self._get_dataframe_multiple_aliases(alias)
raise TypeError("alias must be a string or list of strings")
def get_datapoints(self, alias: Union[str, List[str]]) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Returns the dataframes for the specified alias(es).
Args:
alias (Union[str, List[str]]): alias or list of aliases.
Returns:
Union[pd.DataFrame, Dict[str, pd.DataFrame]: A single dataframe if a single alias has been specified. Or a
dictionary mapping alias to dataframe if a list of aliases has been provided.
"""
if isinstance(alias, str):
return self._get_dataframe_single_alias(alias)
elif isinstance(alias, List):
dataframes = {}
for a in alias:
dataframes[a] = self._get_dataframe_single_alias(a)
return dataframes
raise TypeError("alias must be a string or list of strings")
| 5,581 | 1,600 |
import os
import psutil
import multiprocessing as mp
from mtcnn import MTCNN
class Watcher():
def __init__(self):
self._manager = mp.Manager()
self._generate_queues()
self._face_detector = MtcnnDetectFaces(
self._images_queue,
self._detected_faces_queue,
self._process_intercommunication_queue
)
self._face_detector.attach(self)
def _generate_queues(self):
self._images_queue = self._manager.Queue()
self._detected_faces_queue = self._manager.Queue()
self._process_intercommunication_queue = self._manager.Queue()
@property
def images_queue(self):
return self._images_queue
@property
def detected_faces_queue(self):
return self._detected_faces_queue
@property
def process_intercommunication_queue(self):
return self._process_intercommunication_queue
def run(self):
self._face_detector.run()
def update(self, process_join=False):
process_join()
self._face_detector.run()
def stop(self):
self._face_detector.stop()
class MtcnnDetectFaces():
def __init__(
self,
images_queue,
detected_faces_queue,
process_intercommunication_queue
):
self._observer = None
self._process = None
self._images_queue = images_queue
self._detected_faces_queue = detected_faces_queue
self._process_intercommunication_queue = process_intercommunication_queue
def attach(self, observer):
self._observer = observer
def _notify(self):
self._observer.update(self._process.join)
def _memory_usage(self):
pid = os.getpid()
process = psutil.Process(pid)
mem = process.memory_info().rss / float(2 ** 20)
print(f'PID: {pid} -- Mem: {mem}')
return mem
def _detect_faces(self):
# Imports TF everytime, necessary for multiprocessing the pipeline
print('_mtcnn_detect_faces started')
import tensorflow as tf
gpus = tf.config.experimental.get_visible_devices()
tf.config.experimental.set_memory_growth(gpus[-2], True)
tf.config.experimental.set_memory_growth(gpus[-1], True)
while True:
if self._stopping_condition():
break
if not self._images_queue.empty():
image_container = self._images_queue.get(True, 5)
detected_face = MTCNN().detect_faces(image_container.image)
self._detected_faces_queue.put((image_container, detected_face))
print('_mtcnn_detect_faces Done!')
def _stopping_condition(self):
if self._memory_usage() >= 3_000:
self._notify()
return True
if not self._process_intercommunication_queue.empty():
if self._process_intercommunication_queue.get() == 'Stop':
return True
def run(self):
self._process = mp.Process(
target=self._detect_faces
)
self._process.start()
def stop(self):
self._process_intercommunication_queue.put('Stop')
self._process.join()
if __name__ == '__main__':
mp.set_start_method('spawn')
| 3,262 | 956 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 4 14:56:00 2017
@author: AutuanLiu
"""
import numpy as np
import matplotlib.pyplot as plt
# test so so
plt.plot([3, 1, 4, -5, 6])
plt.ylabel("grade")
plt.savefig("test", dpi = 600)
plt.show()
def f(x):
return np.exp(-x) * np.cos(2 * np.pi * x)
a = np.arange(0, 5, .02)
plt.subplot(2, 1, 1)
plt.plot(a, f(a))
plt.subplot(2, 1, 2)
plt.plot(a, np.cos(2 * np.pi * a), 'r--')
plt.show()
# plot multi image
plt.plot(a, np.sin(a), a, np.sinh(a), a, np.exp(a), a, a ** 3)
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.show()
| 578 | 314 |
import random
import autopy
import pyautogui
import time
import json
from threading import Timer
import cv2
import pytesseract
def click_readyup_button():
print('CHECKING...')
if should_click():
try:
autopy.mouse.move(*(230, 928))
time.sleep(.2)
autopy.mouse.click()
except TypeError:
print('INTERNAL ERROR OCCURED. CONTACT DEVELOPER.')
def get_position():
print(
pyautogui.position()
)
def should_click():
box = ((140, 950), (170, 42))
screenshot = autopy.bitmap.capture_screen(box)
screenshot.save('screenshot.png')
img = cv2.imread('screenshot.png')
threshold = 90
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY_INV)[1]
# cv2.imshow('thresh', thresh)
# cv2.waitKey(0)
scanned_text = pytesseract.image_to_string(img, lang='eng', config='--psm 6')
try:
print(scanned_text)
if 'READY' in scanned_text:
print('read READY in screenshot')
return True
elif 'CANCEL' in scanned_text:
print('read CANCEL in screenshot')
return True
else:
print('read nothing in screenshot')
return False
except AttributeError:
raise Exception(f'OCR MODULE: COULD NOT RETRIEVE TEXT')
if __name__ == '__main__':
print(
should_click()
) | 1,439 | 497 |
# imports
from .ball import Pallino
from .throw import Throw
from .cv.ballfinder import BallFinder
from scipy.spatial import distance as dist
# for now, these are "pixels" (not "inches" or "cm")
TOO_CLOSE_MARGIN = 5
class Frame:
def __init__(self, frameNumber, throwingEnd, pallinoThrowingTeam,
teamHome, teamAway, cam):
self.frameNumer = frameNumber
self.throwingEnd = throwingEnd
self.frameWinner = None
self.pallinoThrowingTeam = pallinoThrowingTeam
self.teamHome = teamHome
self.teamAway = teamAway
# todo
self.cam = cam
self.pallinoInPlay = False
self.ballMotion = False
self.whoseIn = None
self.inPoints = 0
self.framePoints = 0
#### throws ####
self.throw = None
self.throws = []
self.first_bocce_thrown = False
self.second_bocce_thrown = False
self.numThrowsTeamHome = 0
self.numThrowsTeamAway = 0
self.throw_trigger = False
self.num_total_team_balls = None
def initialize_balls(self, playersPerTeam):
self.pallino = Pallino("yellow")
if playersPerTeam == 1:
self.num_total_team_balls = 2
self.teamHome.add_balls(self.num_total_team_balls)
self.teamAway.add_balls(self.num_total_team_balls)
elif playersPerTeam == 2 or playersPerTeam == 4:
self.num_total_team_balls = 4
self.teamHome.add_balls(self.num_total_team_balls)
self.teamAway.add_balls(self.num_total_team_balls)
else:
self.num_total_team_balls = None
raise ValueError("valid playersPerTeam must be 1, 2, or 4")
def start(self):
print("Frame {} is started".format(str(self.frameNumer)))
def throw_pallino(self, team):
# throw the pallino
# todo: determine throwing player; currently gets RANDOM player
self.pallino.set_thrower(team.get_random_player())
self.throw = Throw(self.pallino.thrownBy, self.pallino)
self.throw.throw()
valid = self.throw.valid
# debug
print("{} threw the pallino. Throw is {}.".format(
self.pallino.thrownBy, "valid" if valid else "invalid"))
if valid:
self.pallino.isThrown = True
self.pallinoInPlay = True
return valid
def increment_team_throw_count(self, team):
if team == self.teamHome:
self.numThrowsTeamHome += 1
elif team == self.teamAway:
self.numThrowsTeamAway += 1
def throw_bocce(self, team, followPallino=False):
thrower = None
# whichever team threw the pallino throws again
if followPallino:
print("Following the pallino")
team = self.pallinoThrowingTeam
# otherwise, it is the furthest team's throw
else:
# if the furthest team has no more balls, then switch teams
if self.get_num_remaining_team_balls(team) <= 0:
# switch team
team = self.get_other_team(team)
# grab a bocce ball from the team
ball = self.get_a_team_ball(team.balls)
# grab a player from the team
# todo: determine the throwing player; currently gets a random player with ball
thrower = team.get_random_player_with_balls()
# throw the bocce ball
ball.set_thrower(thrower)
self.throw = Throw(thrower, ball)
self.throw.throw()
self.increment_team_throw_count(team)
valid = self.throw.valid
# update who is in
if followPallino:
self.whoseIn = self.get_other_team(self.pallinoThrowingTeam)
else:
self.whoseIn = self.determine_whose_in(self.cam.last_frame)
# debug
print("{}({}) threw a bocce. Throw is {}. {} is in with points={}. {} remaining balls={}".format(
str(thrower), team.teamBallColor,
"valid" if valid else "invalid", self.whoseIn,
self.inPoints, str(team), self.get_num_remaining_team_balls(team)))
return valid
def get_a_team_ball(self, balls):
for ball in balls:
# go to the next ball if this one is already thrown
if ball.isThrown:
continue
else:
# determined that this team has more balls to throw
return ball
# by default, the team doesn't have any more balls to throw
return None
def either_team_has_balls(self):
if self.get_num_remaining_team_balls(self.teamHome) > 0 \
or self.get_num_remaining_team_balls(self.teamAway) > 0:
return True
return False
def get_num_remaining_team_balls(self, team):
numBalls = 4
for ball in team.balls:
# go to the next ball if this one is already thrown
if ball.isThrown:
numBalls -= 1
return numBalls
def handle_throw(self):
if not self.pallino.isThrown:
# throw the pallino
self.throw_pallino(self.pallinoThrowingTeam)
# check to see if the pallino is in play
if not self.pallinoInPlay:
# swap pallino throwing team
if self.pallinoThrowingTeam == self.teamHome:
self.pallinoThrowingTeam = self.teamAway
elif self.pallinoThrowingTeam == self.teamAway:
self.pallinoThrowingTeam = self.teamHome
# indicate that the pallino hasn't been thrown
self.pallino.isThrown = False
return
return
# the pallino thrower NEEDS to throw their first ball
if not self.first_bocce_thrown:
self.throw_bocce(self.pallinoThrowingTeam,
followPallino=True)
self.first_bocce_thrown = True
self.update_in_points(1) # force to one point
self.first_bocce_thrown = True
return
# the other team ALWAYS throws their first ball next
if not self.second_bocce_thrown:
print("The other team ALWAYS throws their first ball next")
self.throw_bocce(self.get_other_team(self.pallinoThrowingTeam), followPallino=False)
self.second_bocce_thrown = True
# todo we need to determine who is in but kmeans fails if all dff ball nums = 1
self.update_in_points(1)
return
else:
if self.either_team_has_balls():
# throw all remaining balls
self.inPoints, self.whoseIn = self.determine_whose_in(self.cam)
# the other team (furthest team) throws
valid = self.throw_bocce(self.get_other_team(self.whoseIn),
followPallino=False)
else:
print("Please score the frame")
# if we reach this, then the frame is done, so cleanup
self.set_frame_points(self.whoseIn, self.inPoints)
def get_other_team(self, team):
if team == self.teamHome:
return self.teamAway
return self.teamHome
"""Finds closest ball with computer vision"""
def determine_whose_in(self, court):
bf = BallFinder()
bf.pipeline(court, self.numThrowsTeamHome, self.numThrowsTeamAway)
self.pallino = bf.pallino
self.teamHome.balls = bf.homeBalls
self.teamAway.balls = bf.awayBalls
points, frameLeader = self.get_frame_points_and_frame_leader(self.pallino, self.teamHome.balls,
self.teamAway.balls)
return points, frameLeader
def get_frame_points_and_frame_leader(self, pallino, homeBalls, awayBalls):
def get_frame_points(ballDistancesA, ballDistancesB):
framePoints = 0
for (i, dB) in enumerate(ballDistancesB):
for (j, dA) in enumerate(ballDistancesA):
if dA < dB:
framePoints += 1
else:
break
break
return framePoints
if pallino is None:
print("not annotating; couldn't find pallino")
# calculate Euclidean distance for each ball to the pallino
homeBallsDistances = []
awayBallsDistances = []
for ball in homeBalls:
D = dist.euclidean(pallino.coordinates, ball.coordinates)
homeBallsDistances.append(D)
for ball in awayBalls:
D = dist.euclidean(pallino.coordinates, ball.coordinates)
awayBallsDistances.append(D)
# sort balls and distances
homeBallsDistances, homeBalls = zip(*sorted(zip(homeBallsDistances, homeBalls)))
awayBallsDistances, awayBalls = zip(*sorted(zip(awayBallsDistances, awayBalls)))
# grab each min distance (the 0th element in the sorted list)
homeBallsMinDistance = homeBallsDistances[0]
awayBallsMinDistance = awayBallsDistances[0]
# who is closer?
homeIsCloser = homeBallsMinDistance < awayBallsMinDistance
awayIsCloser = awayBallsMinDistance < homeBallsMinDistance
equidistant = homeBallsMinDistance == awayBallsMinDistance
# check if it is "too close to call"
tooCloseToCall = abs(homeBallsMinDistance - awayBallsMinDistance) <= TOO_CLOSE_MARGIN
# determine framePoints and frameWinner
framePoints = None
frameLeader = None
if homeIsCloser:
framePoints = get_frame_points(homeBallsDistances, awayBallsDistances)
frameLeader = self.teamHome
elif awayIsCloser:
framePoints = get_frame_points(awayBallsDistances, homeBallsDistances)
frameLeader = self.teamAway
elif equidistant or tooCloseToCall:
# todo how do we handle when both teams' closest ball is equidistant
framePoints = None
return framePoints, frameLeader
"""Determine's who is in and accounts for their points"""
def update_in_points(self, points=None):
# determine who is in
if points is not None:
self.inPoints = points
return
# check for balls closest to pallino
ballsThrown = 1 + self.numThrowsTeamHome + self.numThrowsTeamAway
# if at least two bocce balls are thrown
if ballsThrown >= 2:
self.inPoints, self.whoseIn = self.determine_whose_in(self.cam.last_frame)
else:
self.inPoints = 0
def set_frame_points(self, inTeam, inPoints):
self.framePoints = inPoints
self.frameWinner = inTeam
def end(self):
print("[INFO] frame winner is {} with points={}".format(
self.frameWinner, self.framePoints))
self.teamAway.balls = []
self.teamHome.balls = []
return self.frameWinner, self.framePoints | 10,993 | 3,297 |
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent
def execute():
dataent.reload_doc("core", "doctype", "print_settings")
print_settings = dataent.get_doc("Print Settings")
print_settings.with_letterhead = 1
print_settings.save()
| 341 | 116 |
import threading, time, signal
from datetime import timedelta
from PyQt5 import QtWidgets
from playsound import playsound
import win10toast
from view import Dial
isActive = False # global variable that says if timer is set
class MyTimer(threading.Thread):
"""This class count down timer and move dial back to the default position"""
def __init__(self, hours: QtWidgets.QLabel, minutes: QtWidgets.QLabel, seconds: QtWidgets.QLabel, dial: QtWidgets.QDial, set_default):
threading.Thread.__init__(self)
self.counter = 0
self.dial_controller = True
self.fun = set_default
self.dial = dial
self.hours_label = hours
self.minutes_label = minutes
self.seconds_label = seconds
self.seconds_label.show()
self.hours = int(hours.text())
self.min = int(minutes.text())
self.sec = 59
self.daemon = True # True: if the main thread is killed this thread will be killed too
self.stopped = threading.Event()
self.interval = timedelta(seconds=1)
self.execute = self.count_down
def count_down(self):
self.sec -= 1
self.seconds_label.setText(str(self.sec))
if self.sec == 0:
self.sec = 59
if self.counter == 60:
self.min -= 1
if self.min == -1:
if self.hours > 0:
self.hours -= 1
self.min = 59
self.hours_label.setText(str(self.hours))
else:
"""show message time left"""
self.times_up()
self.stop()
self.dial.setValue(self.hours * 60 + self.min)
self.minutes_label.setText(str(self.min))
self.counter = 0
self.counter += 1
def times_up(self):
self.back_to_default()
self.fun()
playsound("Sounds/alarm2.mp3", False)
toaster = win10toast.ToastNotifier()
toaster.show_toast("Timer", "Times's up!", icon_path="Graphic/timer_icon.ico", duration=5)
def back_to_default(self):
global isActive
isActive = False
self.seconds_label.setText("59")
self.dial.setDisabled(False)
self.seconds_label.hide()
def stop(self):
self.back_to_default()
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
try:
self.execute()
except RuntimeError:
"""This exception is rised when progrem is closed"""
self.stopped.set()
| 2,738 | 872 |